From 5a19b00fa5da7184b934d7b3f8318eb42e5dfcb7 Mon Sep 17 00:00:00 2001 From: harvest Date: Mon, 16 Sep 2024 23:54:59 -0500 Subject: [PATCH] Deployed 9df9fa06 to nightly with MkDocs 1.5.3 and mike 1.1.2 --- nightly/ontap-metrics/index.html | 36260 +++++++++++++++++------------ nightly/plugins/index.html | 135 + nightly/search/search_index.json | 2 +- nightly/sitemap.xml.gz | Bin 127 -> 127 bytes 4 files changed, 21082 insertions(+), 15315 deletions(-) diff --git a/nightly/ontap-metrics/index.html b/nightly/ontap-metrics/index.html index bce5cc60f..34ae950a9 100644 --- a/nightly/ontap-metrics/index.html +++ b/nightly/ontap-metrics/index.html @@ -11866,13046 +11866,16719 @@
  • - + - svm_vol_avg_latency + svm_ontaps3_svm_abort_multipart_upload_failed
  • - + - svm_vol_other_latency + svm_ontaps3_svm_abort_multipart_upload_failed_client_close
  • - + - svm_vol_other_ops + svm_ontaps3_svm_abort_multipart_upload_latency
  • - + - svm_vol_read_data + svm_ontaps3_svm_abort_multipart_upload_rate
  • - + - svm_vol_read_latency + svm_ontaps3_svm_abort_multipart_upload_total
  • - + - svm_vol_read_ops + svm_ontaps3_svm_allow_access
  • - + - svm_vol_total_ops + svm_ontaps3_svm_anonymous_access
  • - + - svm_vol_write_data + svm_ontaps3_svm_anonymous_deny_access
  • - + - svm_vol_write_latency + svm_ontaps3_svm_authentication_failures
  • - + - svm_vol_write_ops + svm_ontaps3_svm_chunked_upload_reqs
  • - + - svm_vscan_connections_active + svm_ontaps3_svm_complete_multipart_upload_failed
  • - + - svm_vscan_dispatch_latency + svm_ontaps3_svm_complete_multipart_upload_failed_client_close
  • - + - svm_vscan_scan_latency + svm_ontaps3_svm_complete_multipart_upload_latency
  • - + - svm_vscan_scan_noti_received_rate + svm_ontaps3_svm_complete_multipart_upload_rate
  • - + - svm_vscan_scan_request_dispatched_rate + svm_ontaps3_svm_complete_multipart_upload_total
  • - + - token_copy_bytes + svm_ontaps3_svm_connected_connections
  • - + - token_copy_failure + svm_ontaps3_svm_connections
  • - + - token_copy_success + svm_ontaps3_svm_create_bucket_failed
  • - + - token_create_bytes + svm_ontaps3_svm_create_bucket_failed_client_close
  • - + - token_create_failure + svm_ontaps3_svm_create_bucket_latency
  • - + - token_create_success + svm_ontaps3_svm_create_bucket_rate
  • - + - token_zero_bytes + svm_ontaps3_svm_create_bucket_total
  • - + - token_zero_failure + svm_ontaps3_svm_default_deny_access
  • - + - token_zero_success + svm_ontaps3_svm_delete_bucket_failed
  • - + - volume_autosize_grow_threshold_percent + svm_ontaps3_svm_delete_bucket_failed_client_close
  • - + - volume_autosize_maximum_size + svm_ontaps3_svm_delete_bucket_latency
  • - + - volume_avg_latency + svm_ontaps3_svm_delete_bucket_rate
  • - + - volume_capacity_tier_footprint + svm_ontaps3_svm_delete_bucket_total
  • - + - volume_capacity_tier_footprint_percent + svm_ontaps3_svm_delete_object_failed
  • - + - volume_filesystem_size + svm_ontaps3_svm_delete_object_failed_client_close
  • - + - volume_inode_files_total + svm_ontaps3_svm_delete_object_latency
  • - + - volume_inode_files_used + svm_ontaps3_svm_delete_object_rate
  • - + - volume_inode_used_percent + svm_ontaps3_svm_delete_object_tagging_failed
  • - + - volume_nfs_access_latency + svm_ontaps3_svm_delete_object_tagging_failed_client_close
  • - + - volume_nfs_access_ops + svm_ontaps3_svm_delete_object_tagging_latency
  • - + - volume_nfs_getattr_latency + svm_ontaps3_svm_delete_object_tagging_rate
  • - + - volume_nfs_getattr_ops + svm_ontaps3_svm_delete_object_tagging_total
  • - + - volume_nfs_lookup_latency + svm_ontaps3_svm_delete_object_total
  • - + - volume_nfs_lookup_ops + svm_ontaps3_svm_explicit_deny_access
  • - + - volume_nfs_other_latency + svm_ontaps3_svm_get_bucket_acl_failed
  • - + - volume_nfs_other_ops + svm_ontaps3_svm_get_bucket_acl_total
  • - + - volume_nfs_punch_hole_latency + svm_ontaps3_svm_get_bucket_versioning_failed
  • - + - volume_nfs_punch_hole_ops + svm_ontaps3_svm_get_bucket_versioning_total
  • - + - volume_nfs_read_latency + svm_ontaps3_svm_get_data
  • - + - volume_nfs_read_ops + svm_ontaps3_svm_get_object_acl_failed
  • - + - volume_nfs_setattr_latency + svm_ontaps3_svm_get_object_acl_total
  • - + - volume_nfs_setattr_ops + svm_ontaps3_svm_get_object_failed
  • - + - volume_nfs_total_ops + svm_ontaps3_svm_get_object_failed_client_close
  • - + - volume_nfs_write_latency + svm_ontaps3_svm_get_object_lastbyte_latency
  • - + - volume_nfs_write_ops + svm_ontaps3_svm_get_object_latency
  • - + - volume_num_compress_attempts + svm_ontaps3_svm_get_object_rate
  • - + - volume_num_compress_fail + svm_ontaps3_svm_get_object_tagging_failed
  • - + - volume_other_latency + svm_ontaps3_svm_get_object_tagging_failed_client_close
  • - + - volume_other_ops + svm_ontaps3_svm_get_object_tagging_latency
  • - + - volume_overwrite_reserve_available + svm_ontaps3_svm_get_object_tagging_rate
  • - + - volume_overwrite_reserve_total + svm_ontaps3_svm_get_object_tagging_total
  • - + - volume_overwrite_reserve_used + svm_ontaps3_svm_get_object_total
  • - + - volume_performance_tier_footprint + svm_ontaps3_svm_group_policy_evaluated
  • - + - volume_performance_tier_footprint_percent + svm_ontaps3_svm_head_bucket_failed
  • - + - volume_read_data + svm_ontaps3_svm_head_bucket_failed_client_close
  • - + - volume_read_latency + svm_ontaps3_svm_head_bucket_latency
  • - + - volume_read_ops + svm_ontaps3_svm_head_bucket_rate
  • - + - volume_sis_compress_saved + svm_ontaps3_svm_head_bucket_total
  • - + - volume_sis_compress_saved_percent + svm_ontaps3_svm_head_object_failed
  • - + - volume_sis_dedup_saved + svm_ontaps3_svm_head_object_failed_client_close
  • - + - volume_sis_dedup_saved_percent + svm_ontaps3_svm_head_object_latency
  • - + - volume_sis_total_saved + svm_ontaps3_svm_head_object_rate
  • - + - volume_sis_total_saved_percent + svm_ontaps3_svm_head_object_total
  • - + - volume_size + svm_ontaps3_svm_initiate_multipart_upload_failed
  • - + - volume_size_available + svm_ontaps3_svm_initiate_multipart_upload_failed_client_close
  • - + - volume_size_total + svm_ontaps3_svm_initiate_multipart_upload_latency
  • - + - volume_size_used + svm_ontaps3_svm_initiate_multipart_upload_rate
  • - + - volume_size_used_percent + svm_ontaps3_svm_initiate_multipart_upload_total
  • - + - volume_snapshot_count + svm_ontaps3_svm_input_flow_control_entry
  • - + - volume_snapshot_reserve_available + svm_ontaps3_svm_input_flow_control_exit
  • - + - volume_snapshot_reserve_percent + svm_ontaps3_svm_list_buckets_failed
  • - + - volume_snapshot_reserve_size + svm_ontaps3_svm_list_buckets_failed_client_close
  • - + - volume_snapshot_reserve_used + svm_ontaps3_svm_list_buckets_latency
  • - + - volume_snapshot_reserve_used_percent + svm_ontaps3_svm_list_buckets_rate
  • - + - volume_snapshots_size_available + svm_ontaps3_svm_list_buckets_total
  • - + - volume_snapshots_size_used + svm_ontaps3_svm_list_object_versions_failed
  • - + - volume_space_expected_available + svm_ontaps3_svm_list_object_versions_failed_client_close
  • - + - volume_space_logical_available + svm_ontaps3_svm_list_object_versions_latency
  • - + - volume_space_logical_used + svm_ontaps3_svm_list_object_versions_rate
  • - + - volume_space_logical_used_by_afs + svm_ontaps3_svm_list_object_versions_total
  • - + - volume_space_logical_used_by_snapshots + svm_ontaps3_svm_list_objects_failed
  • - + - volume_space_logical_used_percent + svm_ontaps3_svm_list_objects_failed_client_close
  • - + - volume_space_performance_tier_inactive_user_data + svm_ontaps3_svm_list_objects_latency
  • - + - volume_space_performance_tier_inactive_user_data_percent + svm_ontaps3_svm_list_objects_rate
  • - + - volume_space_physical_used + svm_ontaps3_svm_list_objects_total
  • - + - volume_space_physical_used_percent + svm_ontaps3_svm_list_uploads_failed
  • - + - volume_total_data + svm_ontaps3_svm_list_uploads_failed_client_close
  • - + - volume_total_ops + svm_ontaps3_svm_list_uploads_latency
  • - + - volume_write_data + svm_ontaps3_svm_list_uploads_rate
  • - + - volume_write_latency + svm_ontaps3_svm_list_uploads_total
  • - + - volume_write_ops + svm_ontaps3_svm_max_cmds_per_connection
  • - + - vscan_scan_latency + svm_ontaps3_svm_max_connected_connections
  • - + - vscan_scan_request_dispatched_rate + svm_ontaps3_svm_max_requests_outstanding
  • - + - vscan_scanner_stats_pct_cpu_used + svm_ontaps3_svm_multi_delete_reqs
  • - + - vscan_scanner_stats_pct_mem_used + svm_ontaps3_svm_output_flow_control_entry
  • - + - vscan_scanner_stats_pct_network_used + svm_ontaps3_svm_output_flow_control_exit
  • - + - wafl_avg_msg_latency + svm_ontaps3_svm_presigned_url_reqs
  • - + - wafl_avg_non_wafl_msg_latency + svm_ontaps3_svm_put_bucket_versioning_failed
  • - + - wafl_avg_repl_msg_latency + svm_ontaps3_svm_put_bucket_versioning_total
  • - + - wafl_cp_count + svm_ontaps3_svm_put_data
  • - + - wafl_cp_phase_times + svm_ontaps3_svm_put_object_failed
  • - + - wafl_memory_free + svm_ontaps3_svm_put_object_failed_client_close
  • - + - wafl_memory_used + svm_ontaps3_svm_put_object_latency
  • - + - wafl_msg_total + svm_ontaps3_svm_put_object_rate
  • - + - wafl_non_wafl_msg_total + svm_ontaps3_svm_put_object_tagging_failed
  • - + - wafl_read_io_type + svm_ontaps3_svm_put_object_tagging_failed_client_close
  • - + - wafl_reads_from_cache + svm_ontaps3_svm_put_object_tagging_latency
  • - + - wafl_reads_from_cloud + svm_ontaps3_svm_put_object_tagging_rate
  • - + - wafl_reads_from_cloud_s2c_bin + svm_ontaps3_svm_put_object_tagging_total
  • - + - wafl_reads_from_disk + svm_ontaps3_svm_put_object_total
  • - + - wafl_reads_from_ext_cache + svm_ontaps3_svm_request_parse_errors
  • - + - wafl_reads_from_fc_miss + svm_ontaps3_svm_requests
  • - + - wafl_reads_from_pmem + svm_ontaps3_svm_requests_outstanding
  • - + - wafl_reads_from_ssd + svm_ontaps3_svm_root_user_access
  • - + - wafl_repl_msg_total + svm_ontaps3_svm_server_connection_close
  • - + - wafl_total_cp_msecs + svm_ontaps3_svm_signature_v2_reqs
  • - + - wafl_total_cp_util + svm_ontaps3_svm_signature_v4_reqs
  • - - +
  • + + + svm_ontaps3_svm_tagging + +
  • - - + +
  • + + + svm_ontaps3_svm_upload_part_failed + + - - -
  • + + +
  • + + + svm_ontaps3_svm_upload_part_failed_client_close + + - - - - - +
  • + +
  • + + + svm_ontaps3_svm_upload_part_latency + + +
  • + +
  • + + + svm_ontaps3_svm_upload_part_rate + + +
  • + +
  • + + + svm_ontaps3_svm_upload_part_total + + +
  • + +
  • + + + svm_vol_avg_latency + + -
  • - +
  • +
  • + + + svm_vol_other_latency + + - - Power Algorithm - +
  • + +
  • + + + svm_vol_other_ops + + - - -
  • + + +
  • + + + svm_vol_read_data + + - - - - - +
  • + +
  • + + + svm_vol_read_latency + + +
  • + +
  • + + + svm_vol_read_ops + + +
  • + +
  • + + + svm_vol_total_ops + + +
  • + +
  • + + + svm_vol_write_data + + -
  • - +
  • +
  • + + + svm_vol_write_latency + + - - Plugins - +
  • + +
  • + + + svm_vol_write_ops + + - - -
  • + + +
  • + + + svm_vscan_connections_active + + - - - - - +
  • + +
  • + + + svm_vscan_dispatch_latency + + +
  • + +
  • + + + svm_vscan_scan_latency + + +
  • + +
  • + + + svm_vscan_scan_noti_received_rate + + +
  • + +
  • + + + svm_vscan_scan_request_dispatched_rate + + -
  • - +
  • +
  • + + + token_copy_bytes + + - - REST Perf Metrics - +
  • + +
  • + + + token_copy_failure + + - - -
  • + + +
  • + + + token_copy_success + + - - - - - +
  • + +
  • + + + token_create_bytes + + +
  • + +
  • + + + token_create_failure + + +
  • + +
  • + + + token_create_success + + +
  • + +
  • + + + token_zero_bytes + + -
  • - +
  • +
  • + + + token_zero_failure + + - - REST Strategy - - - - -
  • - - - - - - - - - - -
  • - - - - - Templates And Metrics - - - - -
  • - - - - - - - - - - -
  • - - - - - ZAPI and REST gaps - - - - -
  • - - - - - - - - - - - - - - - - - - - - - - -
  • - - - - - - - - - - - -
  • - - - - - - - - - - - -
    -
    -
    - - - + + + + + + + + + + + + +
  • + + + + + Power Algorithm + + + + +
  • + + + + + + + + + + +
  • + + + + + Plugins + + + + +
  • + + + + + + + + + + +
  • + + + + + REST Perf Metrics + + + + +
  • + + + + + + + + + + +
  • + + + + + REST Strategy + + + + +
  • + + + + + + + + + + +
  • + + + + + Templates And Metrics + + + + +
  • + + + + + + + + + + +
  • + + + + + ZAPI and REST gaps + + + + +
  • + + + + + + + + + + + + + + + + + + + + + + +
  • + + + + + + + + + + + +
  • + + + + + +
    +
    +
    + + + +
    +
    +
    + + + -
    -
    -
    - - - -
    -
    - - - - -

    ONTAP Metrics

    - -

    This document describes how Harvest metrics relate to their relevant ONTAP ZAPI and REST mappings, including:

    - -
    Creation Date : 2024-Aug-12
    -ONTAP Version: 9.15.1
    -
    -

    Understanding the structure

    -

    Below is an annotated example of how to interpret the structure of each of the metrics.

    -

    disk_io_queued Name of the metric exported by Harvest

    -

    Number of I/Os queued to the disk but not yet issued Description of the ONTAP metric

    -
      -
    • API will be one of REST or ZAPI depending on which collector is used to collect the metric
    • -
    • Endpoint name of the REST or ZAPI API used to collect this metric
    • -
    • Metric name of the ONTAP metric - Template path of the template that collects the metric
    • -
    -

    Performance related metrics also include:

    -
      -
    • Unit the unit of the metric
    • -
    • Type describes how to calculate a cooked metric from two consecutive ONTAP raw metrics
    • -
    • Base some counters require a base counter for post-processing. When required, this property lists the base counter
    • -
    - - - - - + +
  • + + + svm_ontaps3_svm_delete_bucket_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_bucket_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_bucket_total + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_tagging_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_tagging_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_tagging_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_tagging_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_tagging_total + + + +
  • + +
  • + + + svm_ontaps3_svm_delete_object_total + + + +
  • + +
  • + + + svm_ontaps3_svm_explicit_deny_access + + + +
  • + +
  • + + + svm_ontaps3_svm_get_bucket_acl_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_get_bucket_acl_total + + + +
  • + +
  • + + + svm_ontaps3_svm_get_bucket_versioning_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_get_bucket_versioning_total + + + +
  • + +
  • + + + svm_ontaps3_svm_get_data + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_acl_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_acl_total + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_lastbyte_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_tagging_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_tagging_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_tagging_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_tagging_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_tagging_total + + + +
  • + +
  • + + + svm_ontaps3_svm_get_object_total + + + +
  • + +
  • + + + svm_ontaps3_svm_group_policy_evaluated + + + +
  • + +
  • + + + svm_ontaps3_svm_head_bucket_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_head_bucket_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_head_bucket_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_head_bucket_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_head_bucket_total + + + +
  • + +
  • + + + svm_ontaps3_svm_head_object_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_head_object_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_head_object_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_head_object_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_head_object_total + + + +
  • + +
  • + + + svm_ontaps3_svm_initiate_multipart_upload_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_initiate_multipart_upload_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_initiate_multipart_upload_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_initiate_multipart_upload_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_initiate_multipart_upload_total + + + +
  • + +
  • + + + svm_ontaps3_svm_input_flow_control_entry + + + +
  • + +
  • + + + svm_ontaps3_svm_input_flow_control_exit + + + +
  • + +
  • + + + svm_ontaps3_svm_list_buckets_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_list_buckets_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_list_buckets_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_list_buckets_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_list_buckets_total + + + +
  • + +
  • + + + svm_ontaps3_svm_list_object_versions_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_list_object_versions_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_list_object_versions_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_list_object_versions_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_list_object_versions_total + + + +
  • + +
  • + + + svm_ontaps3_svm_list_objects_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_list_objects_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_list_objects_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_list_objects_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_list_objects_total + + + +
  • + +
  • + + + svm_ontaps3_svm_list_uploads_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_list_uploads_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_list_uploads_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_list_uploads_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_list_uploads_total + + + +
  • + +
  • + + + svm_ontaps3_svm_max_cmds_per_connection + + + +
  • + +
  • + + + svm_ontaps3_svm_max_connected_connections + + + +
  • + +
  • + + + svm_ontaps3_svm_max_requests_outstanding + + + +
  • + +
  • + + + svm_ontaps3_svm_multi_delete_reqs + + + +
  • + +
  • + + + svm_ontaps3_svm_output_flow_control_entry + + + +
  • + +
  • + + + svm_ontaps3_svm_output_flow_control_exit + + + +
  • + +
  • + + + svm_ontaps3_svm_presigned_url_reqs + + + +
  • + +
  • + + + svm_ontaps3_svm_put_bucket_versioning_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_put_bucket_versioning_total + + + +
  • + +
  • + + + svm_ontaps3_svm_put_data + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_tagging_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_tagging_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_tagging_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_tagging_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_tagging_total + + + +
  • + +
  • + + + svm_ontaps3_svm_put_object_total + + + +
  • + +
  • + + + svm_ontaps3_svm_request_parse_errors + + + +
  • + +
  • + + + svm_ontaps3_svm_requests + + + +
  • + +
  • + + + svm_ontaps3_svm_requests_outstanding + + + +
  • + +
  • + + + svm_ontaps3_svm_root_user_access + + + +
  • + +
  • + + + svm_ontaps3_svm_server_connection_close + + + +
  • + +
  • + + + svm_ontaps3_svm_signature_v2_reqs + + + +
  • + +
  • + + + svm_ontaps3_svm_signature_v4_reqs + + + +
  • + +
  • + + + svm_ontaps3_svm_tagging + + + +
  • + +
  • + + + svm_ontaps3_svm_upload_part_failed + + + +
  • + +
  • + + + svm_ontaps3_svm_upload_part_failed_client_close + + + +
  • + +
  • + + + svm_ontaps3_svm_upload_part_latency + + + +
  • + +
  • + + + svm_ontaps3_svm_upload_part_rate + + + +
  • + +
  • + + + svm_ontaps3_svm_upload_part_total + + + +
  • + +
  • + + + svm_vol_avg_latency + + + +
  • + +
  • + + + svm_vol_other_latency + + + +
  • + +
  • + + + svm_vol_other_ops + + + +
  • + +
  • + + + svm_vol_read_data + + + +
  • + +
  • + + + svm_vol_read_latency + + + +
  • + +
  • + + + svm_vol_read_ops + + + +
  • + +
  • + + + svm_vol_total_ops + + + +
  • + +
  • + + + svm_vol_write_data + + + +
  • + +
  • + + + svm_vol_write_latency + + + +
  • + +
  • + + + svm_vol_write_ops + + + +
  • + +
  • + + + svm_vscan_connections_active + + + +
  • + +
  • + + + svm_vscan_dispatch_latency + + + +
  • + +
  • + + + svm_vscan_scan_latency + + + +
  • + +
  • + + + svm_vscan_scan_noti_received_rate + + + +
  • + +
  • + + + svm_vscan_scan_request_dispatched_rate + + + +
  • + +
  • + + + token_copy_bytes + + + +
  • + +
  • + + + token_copy_failure + + + +
  • + +
  • + + + token_copy_success + + + +
  • + +
  • + + + token_create_bytes + + + +
  • + +
  • + + + token_create_failure + + + +
  • + +
  • + + + token_create_success + + + +
  • + +
  • + + + token_zero_bytes + + + +
  • + +
  • + + + token_zero_failure + + + +
  • + +
  • + + + token_zero_success + + + +
  • + +
  • + + + volume_autosize_grow_threshold_percent + + + +
  • + +
  • + + + volume_autosize_maximum_size + + + +
  • + +
  • + + + volume_avg_latency + + + +
  • + +
  • + + + volume_capacity_tier_footprint + + + +
  • + +
  • + + + volume_capacity_tier_footprint_percent + + + +
  • + +
  • + + + volume_filesystem_size + + + +
  • + +
  • + + + volume_inode_files_total + + + +
  • + +
  • + + + volume_inode_files_used + + + +
  • + +
  • + + + volume_inode_used_percent + + + +
  • + +
  • + + + volume_nfs_access_latency + + + +
  • + +
  • + + + volume_nfs_access_ops + + + +
  • + +
  • + + + volume_nfs_getattr_latency + + + +
  • + +
  • + + + volume_nfs_getattr_ops + + + +
  • + +
  • + + + volume_nfs_lookup_latency + + + +
  • + +
  • + + + volume_nfs_lookup_ops + + + +
  • + +
  • + + + volume_nfs_other_latency + + + +
  • + +
  • + + + volume_nfs_other_ops + + + +
  • + +
  • + + + volume_nfs_punch_hole_latency + + + +
  • + +
  • + + + volume_nfs_punch_hole_ops + + + +
  • + +
  • + + + volume_nfs_read_latency + + + +
  • + +
  • + + + volume_nfs_read_ops + + + +
  • + +
  • + + + volume_nfs_setattr_latency + + + +
  • + +
  • + + + volume_nfs_setattr_ops + + + +
  • + +
  • + + + volume_nfs_total_ops + + + +
  • + +
  • + + + volume_nfs_write_latency + + + +
  • + +
  • + + + volume_nfs_write_ops + + + +
  • + +
  • + + + volume_num_compress_attempts + + + +
  • + +
  • + + + volume_num_compress_fail + + + +
  • + +
  • + + + volume_other_latency + + + +
  • + +
  • + + + volume_other_ops + + + +
  • + +
  • + + + volume_overwrite_reserve_available + + + +
  • + +
  • + + + volume_overwrite_reserve_total + + + +
  • + +
  • + + + volume_overwrite_reserve_used + + + +
  • + +
  • + + + volume_performance_tier_footprint + + + +
  • + +
  • + + + volume_performance_tier_footprint_percent + + + +
  • + +
  • + + + volume_read_data + + + +
  • + +
  • + + + volume_read_latency + + + +
  • + +
  • + + + volume_read_ops + + + +
  • + +
  • + + + volume_sis_compress_saved + + + +
  • + +
  • + + + volume_sis_compress_saved_percent + + + +
  • + +
  • + + + volume_sis_dedup_saved + + + +
  • + +
  • + + + volume_sis_dedup_saved_percent + + + +
  • + +
  • + + + volume_sis_total_saved + + + +
  • + +
  • + + + volume_sis_total_saved_percent + + + +
  • + +
  • + + + volume_size + + + +
  • + +
  • + + + volume_size_available + + + +
  • + +
  • + + + volume_size_total + + + +
  • + +
  • + + + volume_size_used + + + +
  • + +
  • + + + volume_size_used_percent + + + +
  • + +
  • + + + volume_snapshot_count + + + +
  • + +
  • + + + volume_snapshot_reserve_available + + + +
  • + +
  • + + + volume_snapshot_reserve_percent + + + +
  • + +
  • + + + volume_snapshot_reserve_size + + + +
  • + +
  • + + + volume_snapshot_reserve_used + + + +
  • + +
  • + + + volume_snapshot_reserve_used_percent + + + +
  • + +
  • + + + volume_snapshots_size_available + + + +
  • + +
  • + + + volume_snapshots_size_used + + + +
  • + +
  • + + + volume_space_expected_available + + + +
  • + +
  • + + + volume_space_logical_available + + + +
  • + +
  • + + + volume_space_logical_used + + + +
  • + +
  • + + + volume_space_logical_used_by_afs + + + +
  • + +
  • + + + volume_space_logical_used_by_snapshots + + + +
  • + +
  • + + + volume_space_logical_used_percent + + + +
  • + +
  • + + + volume_space_performance_tier_inactive_user_data + + + +
  • + +
  • + + + volume_space_performance_tier_inactive_user_data_percent + + + +
  • + +
  • + + + volume_space_physical_used + + + +
  • + +
  • + + + volume_space_physical_used_percent + + + +
  • + +
  • + + + volume_total_data + + + +
  • + +
  • + + + volume_total_ops + + + +
  • + +
  • + + + volume_write_data + + + +
  • + +
  • + + + volume_write_latency + + + +
  • + +
  • + + + volume_write_ops + + + +
  • + +
  • + + + vscan_scan_latency + + + +
  • + +
  • + + + vscan_scan_request_dispatched_rate + + + +
  • + +
  • + + + vscan_scanner_stats_pct_cpu_used + + + +
  • + +
  • + + + vscan_scanner_stats_pct_mem_used + + + +
  • + +
  • + + + vscan_scanner_stats_pct_network_used + + + +
  • + +
  • + + + wafl_avg_msg_latency + + + +
  • + +
  • + + + wafl_avg_non_wafl_msg_latency + + + +
  • + +
  • + + + wafl_avg_repl_msg_latency + + + +
  • + +
  • + + + wafl_cp_count + + + +
  • + +
  • + + + wafl_cp_phase_times + + + +
  • + +
  • + + + wafl_memory_free + + + +
  • + +
  • + + + wafl_memory_used + + + +
  • + +
  • + + + wafl_msg_total + + + +
  • + +
  • + + + wafl_non_wafl_msg_total + + + +
  • + +
  • + + + wafl_read_io_type + + + +
  • + +
  • + + + wafl_reads_from_cache + + + +
  • + +
  • + + + wafl_reads_from_cloud + + + +
  • + +
  • + + + wafl_reads_from_cloud_s2c_bin + + + +
  • + +
  • + + + wafl_reads_from_disk + + + +
  • + +
  • + + + wafl_reads_from_ext_cache + + + +
  • + +
  • + + + wafl_reads_from_fc_miss + + + +
  • + +
  • + + + wafl_reads_from_pmem + + + +
  • + +
  • + + + wafl_reads_from_ssd + + + +
  • + +
  • + + + wafl_repl_msg_total + + + +
  • + +
  • + + + wafl_total_cp_msecs + + + +
  • + +
  • + + + wafl_total_cp_util + + + +
  • + + + + + + + + + + + + + + + +
    +
    + + + + +

    ONTAP Metrics

    + +

    This document describes how Harvest metrics relate to their relevant ONTAP ZAPI and REST mappings, including:

    +
      +
    • +

      Details about which Harvest metrics each dashboard uses. +These can be generated on demand by running bin/harvest grafana metrics. See +#1577 for details.

      +
    • +
    • +

      More information about ONTAP REST performance counters can be found here.

      +
    • +
    +
    Creation Date : 2024-Sep-17
    +ONTAP Version: 9.15.1
    +
    +

    Understanding the structure

    +

    Below is an annotated example of how to interpret the structure of each of the metrics.

    +

    disk_io_queued Name of the metric exported by Harvest

    +

    Number of I/Os queued to the disk but not yet issued Description of the ONTAP metric

    +
      +
    • API will be one of REST or ZAPI depending on which collector is used to collect the metric
    • +
    • Endpoint name of the REST or ZAPI API used to collect this metric
    • +
    • Metric name of the ONTAP metric + Template path of the template that collects the metric
    • +
    +

    Performance related metrics also include:

    +
      +
    • Unit the unit of the metric
    • +
    • Type describes how to calculate a cooked metric from two consecutive ONTAP raw metrics
    • +
    • Base some counters require a base counter for post-processing. When required, this property lists the base counter
    • +
    +
    APIEndpoint
    + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    Metrics

    +

    aggr_disk_busy

    +

    The utilization percent of the disk. aggr_disk_busy is disk_busy aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentdisk_busy_percent
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_capacity

    +

    Disk capacity in MB. aggr_disk_capacity is disk_capacity aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_cp_read_chain

    +

    Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_cp_read_chain is disk_cp_read_chain aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_cp_read_latency

    +

    Average latency per block in microseconds for consistency point read operations. aggr_disk_cp_read_latency is disk_cp_read_latency aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_cp_reads

    +

    Number of disk read operations initiated each second for consistency point processing. aggr_disk_cp_reads is disk_cp_reads aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_io_pending

    +

    Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_io_pending is disk_io_pending aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_io_queued

    +

    Number of I/Os queued to the disk but not yet issued. aggr_disk_io_queued is disk_io_queued aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_busy

    +

    The utilization percent of the disk. aggr_disk_max_busy is the maximum of disk_busy for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentdisk_busy_percent
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_capacity

    +

    Disk capacity in MB. aggr_disk_max_capacity is the maximum of disk_capacity for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_cp_read_chain

    +

    Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_max_cp_read_chain is the maximum of disk_cp_read_chain for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_cp_read_latency

    +

    Average latency per block in microseconds for consistency point read operations. aggr_disk_max_cp_read_latency is the maximum of disk_cp_read_latency for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_cp_reads

    +

    Number of disk read operations initiated each second for consistency point processing. aggr_disk_max_cp_reads is the maximum of disk_cp_reads for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_io_pending

    +

    Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_max_io_pending is the maximum of disk_io_pending for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_io_queued

    +

    Number of I/Os queued to the disk but not yet issued. aggr_disk_max_io_queued is the maximum of disk_io_queued for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_total_data

    +

    Total throughput for user operations per second. aggr_disk_max_total_data is the maximum of disk_total_data for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_total_transfers

    +

    Total number of disk operations involving data transfer initiated per second. aggr_disk_max_total_transfers is the maximum of disk_total_transfers for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituenttotal_transfer_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_user_read_blocks

    +

    Number of blocks transferred for user read operations per second. aggr_disk_max_user_read_blocks is the maximum of disk_user_read_blocks for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_read_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_user_read_chain

    +

    Average number of blocks transferred in each user read operation. aggr_disk_max_user_read_chain is the maximum of disk_user_read_chain for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_user_read_latency

    +

    Average latency per block in microseconds for user read operations. aggr_disk_max_user_read_latency is the maximum of disk_user_read_latency for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_user_reads

    +

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_max_user_reads is the maximum of disk_user_reads for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_user_write_blocks

    +

    Number of blocks transferred for user write operations per second. aggr_disk_max_user_write_blocks is the maximum of disk_user_write_blocks for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_user_write_chain

    +

    Average number of blocks transferred in each user write operation. aggr_disk_max_user_write_chain is the maximum of disk_user_write_chain for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_write_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_user_write_latency

    +

    Average latency per block in microseconds for user write operations. aggr_disk_max_user_write_latency is the maximum of disk_user_write_latency for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_max_user_writes

    +

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_max_user_writes is the maximum of disk_user_writes for label aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_write_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_total_data

    +

    Total throughput for user operations per second. aggr_disk_total_data is disk_total_data aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_total_transfers

    +

    Total number of disk operations involving data transfer initiated per second. aggr_disk_total_transfers is disk_total_transfers aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituenttotal_transfer_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_user_read_blocks

    +

    Number of blocks transferred for user read operations per second. aggr_disk_user_read_blocks is disk_user_read_blocks aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_read_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_user_read_chain

    +

    Average number of blocks transferred in each user read operation. aggr_disk_user_read_chain is disk_user_read_chain aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_user_read_latency

    +

    Average latency per block in microseconds for user read operations. aggr_disk_user_read_latency is disk_user_read_latency aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_user_reads

    +

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_user_reads is disk_user_reads aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_user_write_blocks

    +

    Number of blocks transferred for user write operations per second. aggr_disk_user_write_blocks is disk_user_write_blocks aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_user_write_chain

    +

    Average number of blocks transferred in each user write operation. aggr_disk_user_write_chain is disk_user_write_chain aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_write_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_user_write_latency

    +

    Average latency per block in microseconds for user write operations. aggr_disk_user_write_latency is disk_user_write_latency aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_disk_user_writes

    +

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_user_writes is disk_user_writes aggregated by aggr.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/disk:constituentuser_write_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    +

    aggr_efficiency_savings

    +

    Space saved by storage efficiencies (logical_used - used)

    + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.efficiency.savingsconf/rest/9.12.0/aggr.yaml
    +

    aggr_efficiency_savings_wo_snapshots

    +

    Space saved by storage efficiencies (logical_used - used)

    + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots.savingsconf/rest/9.12.0/aggr.yaml
    +

    aggr_efficiency_savings_wo_snapshots_flexclones

    +

    Space saved by storage efficiencies (logical_used - used)

    + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots_flexclones.savingsconf/rest/9.12.0/aggr.yaml
    +

    aggr_hybrid_cache_size_total

    +

    Total usable space in bytes of SSD cache. Only provided when hybrid_cache.enabled is 'true'.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesblock_storage.hybrid_cache.sizeconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.hybrid-cache-size-totalconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_hybrid_disk_count

    +

    Number of disks used in the cache tier of the aggregate. Only provided when hybrid_cache.enabled is 'true'.

    + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesblock_storage.hybrid_cache.disk_countconf/rest/9.12.0/aggr.yaml
    +

    aggr_inode_files_private_used

    +

    Number of system metadata files used. If the referenced file system is restricted or offline, a value of 0 is returned.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.files_private_usedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.files-private-usedconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_inode_files_total

    +

    Maximum number of user-visible files that this referenced file system can currently hold. If the referenced file system is restricted or offline, a value of 0 is returned.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.files_totalconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.files-totalconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_inode_files_used

    +

    Number of user-visible files used in the referenced file system. If the referenced file system is restricted or offline, a value of 0 is returned.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.files_usedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.files-usedconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_inode_inodefile_private_capacity

    +

    Number of files that can currently be stored on disk for system metadata files. This number will dynamically increase as more system files are created.This is an advanced property; there is an added computationl cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.file_private_capacityconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.inodefile-private-capacityconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_inode_inodefile_public_capacity

    +

    Number of files that can currently be stored on disk for user-visible files. This number will dynamically increase as more user-visible files are created.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.file_public_capacityconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.inodefile-public-capacityconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_inode_maxfiles_available

    +

    The count of the maximum number of user-visible files currently allowable on the referenced file system.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.max_files_availableconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.maxfiles-availableconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_inode_maxfiles_possible

    +

    The largest value to which the maxfiles-available parameter can be increased by reconfiguration, on the referenced file system.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.max_files_possibleconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.maxfiles-possibleconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_inode_maxfiles_used

    +

    The number of user-visible files currently in use on the referenced file system.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.max_files_usedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.maxfiles-usedconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_inode_used_percent

    +

    The percentage of disk space currently in use based on user-visible file count on the referenced file system.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesinode_attributes.used_percentconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.percent-inode-used-capacityconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_logical_used_wo_snapshots

    +

    Logical used

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots.logical_usedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-logical-used-wo-snapshotsconf/zapi/cdot/9.9.0/aggr_efficiency.yaml
    +

    aggr_logical_used_wo_snapshots_flexclones

    +

    Logical used

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots_flexclones.logical_usedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-logical-used-wo-snapshots-flexclonesconf/zapi/cdot/9.9.0/aggr_efficiency.yaml
    +

    aggr_object_store_logical_used

    +

    Logical space usage of aggregates in the attached object store.

    + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/private/cli/aggr/show-spaceobject_store_logical_usedconf/rest/9.12.0/aggr.yaml
    +

    aggr_object_store_physical_used

    +

    Physical space usage of aggregates in the attached object store.

    + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/private/cli/aggr/show-spaceobject_store_physical_usedconf/rest/9.12.0/aggr.yaml
    +

    aggr_physical_used_wo_snapshots

    +

    Total Data Reduction Physical Used Without Snapshots

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots.logical_used, space.efficiency_without_snapshots.savingsconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshotsconf/zapi/cdot/9.9.0/aggr_efficiency.yaml
    +

    aggr_physical_used_wo_snapshots_flexclones

    +

    Total Data Reduction Physical Used without snapshots and flexclones

    + + + + + @@ -24913,21 +28586,20 @@

    Understanding the structureUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + - - - + + +
    APIEndpoint Metric Template
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.efficiency_without_snapshots_flexclones.logical_used, space.efficiency_without_snapshots_flexclones.savingsconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshots-flexclonesconf/zapi/cdot/9.9.0/aggr_efficiency.yaml
    -

    Metrics

    -

    aggr_disk_busy

    -

    The utilization percent of the disk. aggr_disk_busy is disk_busy aggregated by aggr.

    +

    aggr_power

    +

    Power consumed by aggregate in Watts.

    @@ -24940,20 +28612,20 @@

    aggr_disk_busyUnit: percent
    Type: percent
    Base: base_for_disk_busy +

    + - - + +
    NAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    NAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_disk_capacity

    -

    Disk capacity in MB. aggr_disk_capacity is disk_capacity aggregated by aggr.

    +

    aggr_primary_disk_count

    +

    Number of disks used in the aggregate. This includes parity disks, but excludes disks in the hybrid cache.

    @@ -24966,20 +28638,40 @@

    aggr_disk_capacityUnit: mb
    Type: raw
    Base: -

    + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesblock_storage.primary.disk_countconf/rest/9.12.0/aggr.yaml
    +

    aggr_raid_disk_count

    +

    Number of disks in the aggregate.

    + + + + + + + + + + + + + + + - - - + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesblock_storage.primary.disk_count, block_storage.hybrid_cache.disk_countconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-raid-attributes.disk-countconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_cp_read_chain

    -

    Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_cp_read_chain is disk_cp_read_chain aggregated by aggr.

    +

    aggr_raid_plex_count

    +

    Number of plexes in the aggregate

    @@ -24992,20 +28684,20 @@

    aggr_disk_cp_read_chain

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesblock_storage.plexes.#conf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-raid-attributes.plex-countconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_cp_read_latency

    -

    Average latency per block in microseconds for consistency point read operations. aggr_disk_cp_read_latency is disk_cp_read_latency aggregated by aggr.

    +

    aggr_raid_size

    +

    Option to specify the maximum number of disks that can be included in a RAID group.

    @@ -25018,20 +28710,20 @@

    aggr_disk_cp_read_latencyUnit: microsec
    Type: average
    Base: cp_read_blocks -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesblock_storage.primary.raid_sizeconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-raid-attributes.raid-sizeconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_cp_reads

    -

    Number of disk read operations initiated each second for consistency point processing. aggr_disk_cp_reads is disk_cp_reads aggregated by aggr.

    +

    aggr_snapshot_files_total

    +

    Total files allowed in Snapshot copies

    @@ -25044,20 +28736,20 @@

    aggr_disk_cp_readsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatessnapshot.files_totalconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-snapshot-attributes.files-totalconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_io_pending

    -

    Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_io_pending is disk_io_pending aggregated by aggr.

    +

    aggr_snapshot_files_used

    +

    Total files created in Snapshot copies

    @@ -25070,20 +28762,40 @@

    aggr_disk_io_pendingUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatessnapshot.files_usedconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-snapshot-attributes.files-usedconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_io_queued

    -

    Number of I/Os queued to the disk but not yet issued. aggr_disk_io_queued is disk_io_queued aggregated by aggr.

    +

    aggr_snapshot_inode_used_percent

    +

    The percentage of disk space currently in use based on user-visible file (inode) count on the referenced file system.

    + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.percent-inode-used-capacityconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_snapshot_maxfiles_available

    +

    Maximum files available for Snapshot copies

    @@ -25096,20 +28808,20 @@

    aggr_disk_io_queuedUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatessnapshot.max_files_availableconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-snapshot-attributes.maxfiles-availableconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_busy

    -

    The utilization percent of the disk. aggr_disk_max_busy is the maximum of disk_busy for label aggr.

    +

    aggr_snapshot_maxfiles_possible

    +

    The largest value to which the maxfiles-available parameter can be increased by reconfiguration, on the referenced file system.

    @@ -25122,20 +28834,20 @@

    aggr_disk_max_busyUnit: percent
    Type: percent
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatessnapshot.max_files_available, snapshot.max_files_usedconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-snapshot-attributes.maxfiles-possibleconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_capacity

    -

    Disk capacity in MB. aggr_disk_max_capacity is the maximum of disk_capacity for label aggr.

    +

    aggr_snapshot_maxfiles_used

    +

    Files in use by Snapshot copies

    @@ -25148,20 +28860,332 @@

    aggr_disk_max_capacity

    - - - + + + - - - + + + + + +
    RESTapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatessnapshot.max_files_usedconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-snapshot-attributes.maxfiles-usedconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_snapshot_reserve_percent

    +

    Percentage of space reserved for Snapshot copies

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.snapshot.reserve_percentconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.snapshot-reserve-percentconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_snapshot_size_available

    +

    Available space for Snapshot copies in bytes

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.snapshot.availableconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.size-availableconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_snapshot_size_total

    +

    Total space for Snapshot copies in bytes

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.snapshot.totalconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.size-totalconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_snapshot_size_used

    +

    Space used by Snapshot copies in bytes

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.snapshot.usedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.size-usedconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_snapshot_used_percent

    +

    Percentage of disk space used by Snapshot copies

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.snapshot.used_percentconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.percent-used-capacityconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_space_available

    +

    Space available in bytes.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.block_storage.availableconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.size-availableconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_space_capacity_tier_used

    +

    Used space in bytes in the cloud store. Only applicable for aggregates with a cloud store tier.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.cloud_storage.usedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.capacity-tier-usedconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_space_data_compacted_count

    +

    Amount of compacted data in bytes.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.block_storage.data_compacted_countconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.data-compacted-countconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_space_data_compaction_saved

    +

    Space saved in bytes by compacting the data.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.block_storage.data_compaction_space_savedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.data-compaction-space-savedconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_space_data_compaction_saved_percent

    +

    Percentage saved by compacting the data.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.block_storage.data_compaction_space_saved_percentconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.data-compaction-space-saved-percentconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_space_performance_tier_inactive_user_data

    +

    The size that is physically used in the block storage and has a cold temperature, in bytes. This property is only supported if the aggregate is either attached to a cloud store or can be attached to a cloud store.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either block_storage.inactive_user_data or **.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.block_storage.inactive_user_dataconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.performance-tier-inactive-user-dataconf/zapi/cdot/9.8.0/aggr.yaml
    +

    aggr_space_performance_tier_inactive_user_data_percent

    +

    The percentage of inactive user data in the block storage. This property is only supported if the aggregate is either attached to a cloud store or can be attached to a cloud store.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either block_storage.inactive_user_data_percent or **.

    + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/storage/aggregatesspace.block_storage.inactive_user_data_percentconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.performance-tier-inactive-user-data-percentconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_cp_read_chain

    -

    Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_max_cp_read_chain is the maximum of disk_cp_read_chain for label aggr.

    +

    aggr_space_performance_tier_used

    +

    A summation of volume footprints (including volume guarantees), in bytes. This includes all of the volume footprints in the block_storage tier and the cloud_storage tier.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    @@ -25174,20 +29198,14 @@

    aggr_disk_max_cp_read_chainUnit: none
    Type: average
    Base: cp_read_count -

    - - - - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlapi/storage/aggregatesspace.footprintconf/rest/9.12.0/aggr.yaml
    -

    aggr_disk_max_cp_read_latency

    -

    Average latency per block in microseconds for consistency point read operations. aggr_disk_max_cp_read_latency is the maximum of disk_cp_read_latency for label aggr.

    +

    aggr_space_performance_tier_used_percent

    +

    A summation of volume footprints inside the aggregate,as a percentage. A volume's footprint is the amount of space being used for the volume in the aggregate.

    @@ -25200,20 +29218,14 @@

    aggr_disk_max_cp_read_latencyUnit: microsec
    Type: average
    Base: cp_read_blocks -

    - - - - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlapi/storage/aggregatesspace.footprint_percentconf/rest/9.12.0/aggr.yaml
    -

    aggr_disk_max_cp_reads

    -

    Number of disk read operations initiated each second for consistency point processing. aggr_disk_max_cp_reads is the maximum of disk_cp_reads for label aggr.

    +

    aggr_space_physical_used

    +

    Total physical used size of an aggregate in bytes.

    @@ -25226,20 +29238,20 @@

    aggr_disk_max_cp_reads

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.block_storage.physical_usedconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.physical-usedconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_io_pending

    -

    Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_max_io_pending is the maximum of disk_io_pending for label aggr.

    +

    aggr_space_physical_used_percent

    +

    Physical used percentage.

    @@ -25252,20 +29264,20 @@

    aggr_disk_max_io_pending

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.block_storage.physical_used_percentconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.physical-used-percentconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_io_queued

    -

    Number of I/Os queued to the disk but not yet issued. aggr_disk_max_io_queued is the maximum of disk_io_queued for label aggr.

    +

    aggr_space_reserved

    +

    The total disk space in bytes that is reserved on the referenced file system. The reserved space is already counted in the used space, so this element can be used to see what portion of the used space represents space reserved for future use.

    @@ -25277,21 +29289,15 @@

    aggr_disk_max_io_queued

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.total-reserved-spaceconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_total_data

    -

    Total throughput for user operations per second. aggr_disk_max_total_data is the maximum of disk_total_data for label aggr.

    +

    aggr_space_sis_saved

    +

    Amount of space saved in bytes by storage efficiency.

    @@ -25304,20 +29310,20 @@

    aggr_disk_max_total_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.block_storage.volume_deduplication_space_savedconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.sis-space-savedconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_total_transfers

    -

    Total number of disk operations involving data transfer initiated per second. aggr_disk_max_total_transfers is the maximum of disk_total_transfers for label aggr.

    +

    aggr_space_sis_saved_percent

    +

    Percentage of space saved by storage efficiency.

    @@ -25330,20 +29336,20 @@

    aggr_disk_max_total_transfersUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.block_storage.volume_deduplication_space_saved_percentconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.sis-space-saved-percentconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_user_read_blocks

    -

    Number of blocks transferred for user read operations per second. aggr_disk_max_user_read_blocks is the maximum of disk_user_read_blocks for label aggr.

    +

    aggr_space_sis_shared_count

    +

    Amount of shared bytes counted by storage efficiency.

    @@ -25356,20 +29362,20 @@

    aggr_disk_max_user_read_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.block_storage.volume_deduplication_shared_countconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.sis-shared-countconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_user_read_chain

    -

    Average number of blocks transferred in each user read operation. aggr_disk_max_user_read_chain is the maximum of disk_user_read_chain for label aggr.

    +

    aggr_space_total

    +

    Total usable space in bytes, not including WAFL reserve and aggregate Snapshot copy reserve.

    @@ -25382,20 +29388,20 @@

    aggr_disk_max_user_read_chainUnit: none
    Type: average
    Base: user_read_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.block_storage.sizeconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.size-totalconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_user_read_latency

    -

    Average latency per block in microseconds for user read operations. aggr_disk_max_user_read_latency is the maximum of disk_user_read_latency for label aggr.

    +

    aggr_space_used

    +

    Space used or reserved in bytes. Includes volume guarantees and aggregate metadata.

    @@ -25408,20 +29414,20 @@

    aggr_disk_max_user_read_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.block_storage.usedconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.size-usedconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_user_reads

    -

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_max_user_reads is the maximum of disk_user_reads for label aggr.

    +

    aggr_space_used_percent

    +

    The percentage of disk space currently in use on the referenced file system

    @@ -25434,20 +29440,20 @@

    aggr_disk_max_user_reads

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.block_storage.used, space.block_storage.sizeconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-space-attributes.percent-used-capacityconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_user_write_blocks

    -

    Number of blocks transferred for user write operations per second. aggr_disk_max_user_write_blocks is the maximum of disk_user_write_blocks for label aggr.

    +

    aggr_total_logical_used

    +

    Logical used

    @@ -25460,20 +29466,20 @@

    aggr_disk_max_user_write_blocks

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.efficiency.logical_usedconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-logical-usedconf/zapi/cdot/9.9.0/aggr_efficiency.yaml
    -

    aggr_disk_max_user_write_chain

    -

    Average number of blocks transferred in each user write operation. aggr_disk_max_user_write_chain is the maximum of disk_user_write_chain for label aggr.

    +

    aggr_total_physical_used

    +

    Total Physical Used

    @@ -25486,20 +29492,20 @@

    aggr_disk_max_user_write_chainUnit: none
    Type: average
    Base: user_write_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesspace.efficiency.logical_used, space.efficiency.savingsconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-physical-usedconf/zapi/cdot/9.9.0/aggr_efficiency.yaml
    -

    aggr_disk_max_user_write_latency

    -

    Average latency per block in microseconds for user write operations. aggr_disk_max_user_write_latency is the maximum of disk_user_write_latency for label aggr.

    +

    aggr_volume_count

    +

    The aggregate's volume count, which includes both FlexVols and FlexGroup constituents.

    @@ -25512,20 +29518,20 @@

    aggr_disk_max_user_write_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yamlapi/storage/aggregatesvolume_countconf/rest/9.12.0/aggr.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-get-iteraggr-attributes.aggr-volume-count-attributes.flexvol-countconf/zapi/cdot/9.8.0/aggr.yaml
    -

    aggr_disk_max_user_writes

    -

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_max_user_writes is the maximum of disk_user_writes for label aggr.

    +

    cifs_session_connection_count

    +

    A counter used to track requests that are sent to the volumes to the node.

    @@ -25538,20 +29544,20 @@

    aggr_disk_max_user_writesUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/protocols/cifs/sessionsconnection_countconf/rest/9.8.0/cifs_session.yaml
    ZAPIperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlcifs-session-get-itercifs-session.connection-countconf/zapi/cdot/9.8.0/cifs_session.yaml
    -

    aggr_disk_total_data

    -

    Total throughput for user operations per second. aggr_disk_total_data is disk_total_data aggregated by aggr.

    +

    cloud_target_used

    +

    The amount of cloud space used by all the aggregates attached to the target, in bytes. This field is only populated for FabricPool targets. The value is recalculated once every 5 minutes.

    @@ -25564,20 +29570,20 @@

    aggr_disk_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cloud/targetsusedconf/rest/9.12.0/cloud_target.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlaggr-object-store-config-get-iteraggr-object-store-config-info.used-spaceconf/zapi/cdot/9.10.0/aggr_object_store_config.yaml
    -

    aggr_disk_total_transfers

    -

    Total number of disk operations involving data transfer initiated per second. aggr_disk_total_transfers is disk_total_transfers aggregated by aggr.

    +

    cluster_new_status

    +

    It is an indicator of the overall health status of the cluster, with a value of 1 indicating a healthy status and a value of 0 indicating an unhealthy status.

    @@ -25590,20 +29596,20 @@

    aggr_disk_total_transfersUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlNAHarvest generatedconf/rest/9.12.0/status.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlNAHarvest generatedconf/zapi/cdot/9.8.0/status.yaml
    -

    aggr_disk_user_read_blocks

    -

    Number of blocks transferred for user read operations per second. aggr_disk_user_read_blocks is disk_user_read_blocks aggregated by aggr.

    +

    cluster_subsystem_outstanding_alerts

    +

    Number of outstanding alerts

    @@ -25616,20 +29622,20 @@

    aggr_disk_user_read_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/private/cli/system/health/subsystemoutstanding_alert_countconf/rest/9.12.0/subsystem.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamldiagnosis-subsystem-config-get-iterdiagnosis-subsystem-config-info.outstanding-alert-countconf/zapi/cdot/9.8.0/subsystem.yaml
    -

    aggr_disk_user_read_chain

    -

    Average number of blocks transferred in each user read operation. aggr_disk_user_read_chain is disk_user_read_chain aggregated by aggr.

    +

    cluster_subsystem_suppressed_alerts

    +

    Number of suppressed alerts

    @@ -25642,20 +29648,20 @@

    aggr_disk_user_read_chainUnit: none
    Type: average
    Base: user_read_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/private/cli/system/health/subsystemsuppressed_alert_countconf/rest/9.12.0/subsystem.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yamldiagnosis-subsystem-config-get-iterdiagnosis-subsystem-config-info.suppressed-alert-countconf/zapi/cdot/9.8.0/subsystem.yaml
    -

    aggr_disk_user_read_latency

    -

    Average latency per block in microseconds for user read operations. aggr_disk_user_read_latency is disk_user_read_latency aggregated by aggr.

    +

    copy_manager_bce_copy_count_curr

    +

    Current number of copy requests being processed by the Block Copy Engine.

    @@ -25668,20 +29674,20 @@

    aggr_disk_user_read_latencyUnit: microsec
    Type: average
    Base: user_read_block_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/copy_managerblock_copy_engine_current_copy_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances copy_managerbce_copy_count_curr
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yaml
    -

    aggr_disk_user_reads

    -

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_user_reads is disk_user_reads aggregated by aggr.

    +

    copy_manager_kb_copied

    +

    Sum of kilo-bytes copied.

    @@ -25694,20 +29700,20 @@

    aggr_disk_user_readsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/copy_managerKB_copied
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances copy_managerKB_copied
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yaml
    -

    aggr_disk_user_write_blocks

    -

    Number of blocks transferred for user write operations per second. aggr_disk_user_write_blocks is disk_user_write_blocks aggregated by aggr.

    +

    copy_manager_ocs_copy_count_curr

    +

    Current number of copy requests being processed by the ONTAP copy subsystem.

    @@ -25720,20 +29726,20 @@

    aggr_disk_user_write_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/copy_managerontap_copy_subsystem_current_copy_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances copy_managerocs_copy_count_curr
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yaml
    -

    aggr_disk_user_write_chain

    -

    Average number of blocks transferred in each user write operation. aggr_disk_user_write_chain is disk_user_write_chain aggregated by aggr.

    +

    copy_manager_sce_copy_count_curr

    +

    Current number of copy requests being processed by the System Continuous Engineering.

    @@ -25746,20 +29752,20 @@

    aggr_disk_user_write_chainUnit: none
    Type: average
    Base: user_write_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/copy_managersystem_continuous_engineering_current_copy_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances copy_managersce_copy_count_curr
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yaml
    -

    aggr_disk_user_write_latency

    -

    Average latency per block in microseconds for user write operations. aggr_disk_user_write_latency is disk_user_write_latency aggregated by aggr.

    +

    copy_manager_spince_copy_count_curr

    +

    Current number of copy requests being processed by the SpinCE.

    @@ -25772,20 +29778,20 @@

    aggr_disk_user_write_latencyUnit: microsec
    Type: average
    Base: user_write_block_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/copy_managerspince_current_copy_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances copy_managerspince_copy_count_curr
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yaml
    -

    aggr_disk_user_writes

    -

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_user_writes is disk_user_writes aggregated by aggr.

    +

    disk_busy

    +

    The utilization percent of the disk

    @@ -25799,19 +29805,19 @@

    aggr_disk_user_writesUnit: per_sec
    Type: rate
    Base: +

    - +
    disk_busy_percent
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPI perf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    disk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_efficiency_savings

    -

    Space saved by storage efficiencies (logical_used - used)

    +

    disk_bytes_per_sector

    +

    Bytes per sector.

    @@ -25824,14 +29830,20 @@

    aggr_efficiency_savings

    - - - + + + + + + + + +
    RESTapi/storage/aggregatesspace.efficiency.savingsconf/rest/9.12.0/aggr.yamlapi/storage/disksbytes_per_sectorconf/rest/9.12.0/disk.yaml
    ZAPIstorage-disk-get-iterstorage-disk-info.disk-inventory-info.bytes-per-sectorconf/zapi/cdot/9.8.0/disk.yaml
    -

    aggr_efficiency_savings_wo_snapshots

    -

    Space saved by storage efficiencies (logical_used - used)

    +

    disk_capacity

    +

    Disk capacity in MB

    @@ -25844,14 +29856,20 @@

    aggr_efficiency_savings_wo_snapsho

    - - - + + + + + + + + +
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots.savingsconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_efficiency_savings_wo_snapshots_flexclones

    -

    Space saved by storage efficiencies (logical_used - used)

    +

    disk_cp_read_chain

    +

    Average number of blocks transferred in each consistency point read operation during a CP

    @@ -25864,14 +29882,20 @@

    aggr_efficiency_savings

    - - - + + + + + + + + +
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots_flexclones.savingsconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_hybrid_cache_size_total

    -

    Total usable space in bytes of SSD cache. Only provided when hybrid_cache.enabled is 'true'.

    +

    disk_cp_read_latency

    +

    Average latency per block in microseconds for consistency point read operations

    @@ -25884,20 +29908,20 @@

    aggr_hybrid_cache_size_totalUnit: microsec
    Type: average
    Base: cp_read_blocks +

    - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.hybrid-cache-size-totalconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_hybrid_disk_count

    -

    Number of disks used in the cache tier of the aggregate. Only provided when hybrid_cache.enabled is 'true'.

    +

    disk_cp_reads

    +

    Number of disk read operations initiated each second for consistency point processing

    @@ -25910,14 +29934,20 @@

    aggr_hybrid_disk_count

    - - - + + + + + + + + +
    RESTapi/storage/aggregatesblock_storage.hybrid_cache.disk_countconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_inode_files_private_used

    -

    Number of system metadata files used. If the referenced file system is restricted or offline, a value of 0 is returned.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    +

    disk_io_pending

    +

    Average number of I/Os issued to the disk for which we have not yet received the response

    @@ -25930,20 +29960,20 @@

    aggr_inode_files_private_usedUnit: none
    Type: average
    Base: base_for_disk_busy +

    - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.files-private-usedconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_inode_files_total

    -

    Maximum number of user-visible files that this referenced file system can currently hold. If the referenced file system is restricted or offline, a value of 0 is returned.

    +

    disk_io_queued

    +

    Number of I/Os queued to the disk but not yet issued

    @@ -25956,20 +29986,20 @@

    aggr_inode_files_total

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesinode_attributes.files_totalconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.files-totalconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_inode_files_used

    -

    Number of user-visible files used in the referenced file system. If the referenced file system is restricted or offline, a value of 0 is returned.

    +

    disk_power_on_hours

    +

    Hours powered on.

    @@ -25982,20 +30012,14 @@

    aggr_inode_files_usedaggr_inode_inodefile_private_capacity

    -

    Number of files that can currently be stored on disk for system metadata files. This number will dynamically increase as more system files are created.This is an advanced property; there is an added computationl cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    +

    disk_sectors

    +

    Number of sectors on the disk.

    @@ -26008,20 +30032,20 @@

    aggr_inode_inodefile_private_capa

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesinode_attributes.file_private_capacityconf/rest/9.12.0/aggr.yamlapi/storage/diskssector_countconf/rest/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.inodefile-private-capacityconf/zapi/cdot/9.8.0/aggr.yamlstorage-disk-get-iterstorage-disk-info.disk-inventory-info.capacity-sectorsconf/zapi/cdot/9.8.0/disk.yaml
    -

    aggr_inode_inodefile_public_capacity

    -

    Number of files that can currently be stored on disk for user-visible files. This number will dynamically increase as more user-visible files are created.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    +

    disk_stats_average_latency

    +

    Average I/O latency across all active paths, in milliseconds.

    @@ -26034,20 +30058,20 @@

    aggr_inode_inodefile_public_capaci

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesinode_attributes.file_public_capacityconf/rest/9.12.0/aggr.yamlapi/storage/disksstats.average_latencyconf/rest/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.inodefile-public-capacityconf/zapi/cdot/9.8.0/aggr.yamlstorage-disk-get-iterstorage-disk-info.disk-stats-info.average-latencyconf/zapi/cdot/9.8.0/disk.yaml
    -

    aggr_inode_maxfiles_available

    -

    The count of the maximum number of user-visible files currently allowable on the referenced file system.

    +

    disk_stats_io_kbps

    +

    Total Disk Throughput in KBPS Across All Active Paths

    @@ -26060,20 +30084,20 @@

    aggr_inode_maxfiles_availableaggr_inode_maxfiles_possible

    -

    The largest value to which the maxfiles-available parameter can be increased by reconfiguration, on the referenced file system.

    +

    disk_stats_sectors_read

    +

    Number of Sectors Read

    @@ -26086,20 +30110,20 @@

    aggr_inode_maxfiles_possibleaggr_inode_maxfiles_used

    -

    The number of user-visible files currently in use on the referenced file system.

    +

    disk_stats_sectors_written

    +

    Number of Sectors Written

    @@ -26112,20 +30136,20 @@

    aggr_inode_maxfiles_used

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesinode_attributes.max_files_usedconf/rest/9.12.0/aggr.yamlapi/private/cli/disksectors_writtenconf/rest/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.maxfiles-usedconf/zapi/cdot/9.8.0/aggr.yamlstorage-disk-get-iterstorage-disk-info.disk-stats-info.sectors-writtenconf/zapi/cdot/9.8.0/disk.yaml
    -

    aggr_inode_used_percent

    -

    The percentage of disk space currently in use based on user-visible file count on the referenced file system.

    +

    disk_total_data

    +

    Total throughput for user operations per second

    @@ -26138,20 +30162,20 @@

    aggr_inode_used_percent

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesinode_attributes.used_percentconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-inode-attributes.percent-inode-used-capacityconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_logical_used_wo_snapshots

    -

    Logical used

    +

    disk_total_transfers

    +

    Total number of disk operations involving data transfer initiated per second

    @@ -26164,20 +30188,20 @@

    aggr_logical_used_wo_snapshotsUnit: per_sec
    Type: rate
    Base: +

    - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-logical-used-wo-snapshotsconf/zapi/cdot/9.9.0/aggr_efficiency.yamlperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_logical_used_wo_snapshots_flexclones

    -

    Logical used

    +

    disk_uptime

    +

    Number of seconds the drive has been powered on

    @@ -26190,20 +30214,20 @@

    aggr_logical_used_wo_snapshot

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots_flexclones.logical_usedconf/rest/9.12.0/aggr.yamlapi/storage/disksstats.power_on_hours, 60, 60conf/rest/9.12.0/disk.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-logical-used-wo-snapshots-flexclonesconf/zapi/cdot/9.9.0/aggr_efficiency.yamlstorage-disk-get-iterstorage-disk-info.disk-stats-info.power-on-time-intervalconf/zapi/cdot/9.8.0/disk.yaml
    -

    aggr_object_store_logical_used

    -

    Logical space usage of aggregates in the attached object store.

    +

    disk_usable_size

    +

    Usable size of each disk, in bytes.

    @@ -26216,14 +30240,14 @@

    aggr_object_store_logical_usedaggr_object_store_physical_used

    -

    Physical space usage of aggregates in the attached object store.

    +

    disk_user_read_blocks

    +

    Number of blocks transferred for user read operations per second

    @@ -26236,14 +30260,20 @@

    aggr_object_store_physical_used

    - - - + + + + + + + + +
    RESTapi/private/cli/aggr/show-spaceobject_store_physical_usedconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituentuser_read_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_physical_used_wo_snapshots

    -

    Total Data Reduction Physical Used Without Snapshots

    +

    disk_user_read_chain

    +

    Average number of blocks transferred in each user read operation

    @@ -26256,20 +30286,20 @@

    aggr_physical_used_wo_snapshots

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots.logical_used, space.efficiency_without_snapshots.savingsconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshotsconf/zapi/cdot/9.9.0/aggr_efficiency.yamlperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_physical_used_wo_snapshots_flexclones

    -

    Total Data Reduction Physical Used without snapshots and flexclones

    +

    disk_user_read_latency

    +

    Average latency per block in microseconds for user read operations

    @@ -26282,20 +30312,20 @@

    aggr_physical_used_wo_snapsh

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesspace.efficiency_without_snapshots_flexclones.logical_used, space.efficiency_without_snapshots_flexclones.savingsconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshots-flexclonesconf/zapi/cdot/9.9.0/aggr_efficiency.yamlperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_power

    -

    Power consumed by aggregate in Watts.

    +

    disk_user_reads

    +

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests

    @@ -26308,20 +30338,20 @@

    aggr_powerUnit:
    Type:
    Base: +

    + - - + +
    api/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    perf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_primary_disk_count

    -

    Number of disks used in the aggregate. This includes parity disks, but excludes disks in the hybrid cache.

    +

    disk_user_write_blocks

    +

    Number of blocks transferred for user write operations per second

    @@ -26334,14 +30364,20 @@

    aggr_primary_disk_count

    - - - + + + + + + + + +
    RESTapi/storage/aggregatesblock_storage.primary.disk_countconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_raid_disk_count

    -

    Number of disks in the aggregate.

    +

    disk_user_write_chain

    +

    Average number of blocks transferred in each user write operation

    @@ -26354,20 +30390,20 @@

    aggr_raid_disk_countUnit: none
    Type: average
    Base: user_write_count +

    - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-raid-attributes.disk-countconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_raid_plex_count

    -

    Number of plexes in the aggregate

    +

    disk_user_write_latency

    +

    Average latency per block in microseconds for user write operations

    @@ -26380,20 +30416,20 @@

    aggr_raid_plex_countUnit: microsec
    Type: average
    Base: user_write_block_count +

    - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-raid-attributes.plex-countconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_raid_size

    -

    Option to specify the maximum number of disks that can be included in a RAID group.

    +

    disk_user_writes

    +

    Number of disk write operations initiated each second for storing data or metadata associated with user requests

    @@ -26406,20 +30442,20 @@

    aggr_raid_sizeUnit: per_sec
    Type: rate
    Base: +

    - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-raid-attributes.raid-sizeconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    aggr_snapshot_files_total

    -

    Total files allowed in Snapshot copies

    +

    environment_sensor_average_ambient_temperature

    +

    Average temperature of all ambient sensors for node in Celsius.

    @@ -26432,20 +30468,20 @@

    aggr_snapshot_files_totalaggr_snapshot_files_used

    -

    Total files created in Snapshot copies

    +

    environment_sensor_average_fan_speed

    +

    Average fan speed for node in rpm.

    @@ -26458,20 +30494,20 @@

    aggr_snapshot_files_used

    - - - + + + - - - + + +
    RESTapi/storage/aggregatessnapshot.files_usedconf/rest/9.12.0/aggr.yamlNAHarvest generatedconf/rest/9.12.0/sensor.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.files-usedconf/zapi/cdot/9.8.0/aggr.yamlNAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yaml
    -

    aggr_snapshot_inode_used_percent

    -

    The percentage of disk space currently in use based on user-visible file (inode) count on the referenced file system.

    +

    environment_sensor_average_temperature

    +

    Average temperature of all non-ambient sensors for node in Celsius.

    @@ -26483,15 +30519,21 @@

    aggr_snapshot_inode_used_percent

    + + + + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.percent-inode-used-capacityconf/zapi/cdot/9.8.0/aggr.yamlNAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yaml
    -

    aggr_snapshot_maxfiles_available

    -

    Maximum files available for Snapshot copies

    +

    environment_sensor_max_fan_speed

    +

    Maximum fan speed for node in rpm.

    @@ -26504,20 +30546,20 @@

    aggr_snapshot_maxfiles_available

    - - - + + + - - - + + +
    RESTapi/storage/aggregatessnapshot.max_files_availableconf/rest/9.12.0/aggr.yamlNAHarvest generatedconf/rest/9.12.0/sensor.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.maxfiles-availableconf/zapi/cdot/9.8.0/aggr.yamlNAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yaml
    -

    aggr_snapshot_maxfiles_possible

    -

    The largest value to which the maxfiles-available parameter can be increased by reconfiguration, on the referenced file system.

    +

    environment_sensor_max_temperature

    +

    Maximum temperature of all non-ambient sensors for node in Celsius.

    @@ -26530,20 +30572,20 @@

    aggr_snapshot_maxfiles_possible

    - - - + + + - - - + + +
    RESTapi/storage/aggregatessnapshot.max_files_available, snapshot.max_files_usedconf/rest/9.12.0/aggr.yamlNAHarvest generatedconf/rest/9.12.0/sensor.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.maxfiles-possibleconf/zapi/cdot/9.8.0/aggr.yamlNAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yaml
    -

    aggr_snapshot_maxfiles_used

    -

    Files in use by Snapshot copies

    +

    environment_sensor_min_ambient_temperature

    +

    Minimum temperature of all ambient sensors for node in Celsius.

    @@ -26556,20 +30598,20 @@

    aggr_snapshot_maxfiles_usedaggr_snapshot_reserve_percent

    -

    Percentage of space reserved for Snapshot copies

    +

    environment_sensor_min_fan_speed

    +

    Minimum fan speed for node in rpm.

    @@ -26582,20 +30624,20 @@

    aggr_snapshot_reserve_percentaggr_snapshot_size_available

    -

    Available space for Snapshot copies in bytes

    +

    environment_sensor_min_temperature

    +

    Minimum temperature of all non-ambient sensors for node in Celsius.

    @@ -26608,20 +30650,20 @@

    aggr_snapshot_size_availableaggr_snapshot_size_total

    -

    Total space for Snapshot copies in bytes

    +

    environment_sensor_power

    +

    Power consumed by a node in Watts.

    @@ -26634,20 +30676,20 @@

    aggr_snapshot_size_total

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesspace.snapshot.totalconf/rest/9.12.0/aggr.yamlNAHarvest generatedconf/rest/9.12.0/sensor.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.size-totalconf/zapi/cdot/9.8.0/aggr.yamlNAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yaml
    -

    aggr_snapshot_size_used

    -

    Space used by Snapshot copies in bytes

    +

    environment_sensor_threshold_value

    +

    Provides the sensor reading.

    @@ -26660,20 +30702,20 @@

    aggr_snapshot_size_used

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesspace.snapshot.usedconf/rest/9.12.0/aggr.yamlapi/cluster/sensorsvalueconf/rest/9.12.0/sensor.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-snapshot-attributes.size-usedconf/zapi/cdot/9.8.0/aggr.yamlenvironment-sensors-get-iterenvironment-sensors-info.threshold-sensor-valueconf/zapi/cdot/9.8.0/sensor.yaml
    -

    aggr_snapshot_used_percent

    -

    Percentage of disk space used by Snapshot copies

    +

    external_service_op_num_not_found_responses

    +

    Number of 'Not Found' responses for calls to this operation.

    @@ -26685,21 +30727,15 @@

    aggr_snapshot_used_percentUnit: none
    Type: delta
    Base: +

    conf/zapiperf/cdot/9.8.0/external_service_operation.yaml
    -

    aggr_space_available

    -

    Space available in bytes.

    +

    external_service_op_num_request_failures

    +

    A cumulative count of all request failures.

    @@ -26711,21 +30747,15 @@

    aggr_space_availableUnit: none
    Type: delta
    Base: +

    conf/zapiperf/cdot/9.8.0/external_service_operation.yaml
    -

    aggr_space_capacity_tier_used

    -

    Used space in bytes in the cloud store. Only applicable for aggregates with a cloud store tier.

    +

    external_service_op_num_requests_sent

    +

    Number of requests sent to this service.

    @@ -26737,21 +30767,15 @@

    aggr_space_capacity_tier_usedUnit: none
    Type: delta
    Base: +

    conf/zapiperf/cdot/9.8.0/external_service_operation.yaml
    -

    aggr_space_data_compacted_count

    -

    Amount of compacted data in bytes.

    +

    external_service_op_num_responses_received

    +

    Number of responses received from the server (does not include timeouts).

    @@ -26763,47 +30787,35 @@

    aggr_space_data_compacted_count

    - - - - - - - - - + + +
    RESTapi/storage/aggregatesspace.block_storage.data_compacted_countconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.data-compacted-countconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances external_service_opnum_responses_received
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yaml
    -

    aggr_space_data_compaction_saved

    -

    Space saved in bytes by compacting the data.

    +

    external_service_op_num_successful_responses

    +

    Number of successful responses to this operation.

    - - - - - - - - - - + + + + - - - + + +
    API EndpointMetricTemplate
    RESTapi/storage/aggregatesspace.block_storage.data_compaction_space_savedconf/rest/9.12.0/aggr.yamlMetricTemplate
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.data-compaction-space-savedconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances external_service_opnum_successful_responses
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yaml
    -

    aggr_space_data_compaction_saved_percent

    -

    Percentage saved by compacting the data.

    +

    external_service_op_num_timeouts

    +

    Number of times requests to the server for this operation timed out, meaning no response was recevied in a given time period.

    @@ -26815,21 +30827,15 @@

    aggr_space_data_compaction_sav

    - - - - - - - - - + + +
    RESTapi/storage/aggregatesspace.block_storage.data_compaction_space_saved_percentconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.data-compaction-space-saved-percentconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances external_service_opnum_timeouts
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yaml
    -

    aggr_space_performance_tier_inactive_user_data

    -

    The size that is physically used in the block storage and has a cold temperature, in bytes. This property is only supported if the aggregate is either attached to a cloud store or can be attached to a cloud store.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either block_storage.inactive_user_data or **.

    +

    external_service_op_request_latency

    +

    Average latency of requests for operations of this type on this server.

    @@ -26841,21 +30847,15 @@

    aggr_space_performance_t

    - - - - - - - - - + + +
    RESTapi/storage/aggregatesspace.block_storage.inactive_user_dataconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.performance-tier-inactive-user-dataconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances external_service_oprequest_latency
    Unit: microsec
    Type: average
    Base: num_requests_sent
    conf/zapiperf/cdot/9.8.0/external_service_operation.yaml
    -

    aggr_space_performance_tier_inactive_user_data_percent

    -

    The percentage of inactive user data in the block storage. This property is only supported if the aggregate is either attached to a cloud store or can be attached to a cloud store.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either block_storage.inactive_user_data_percent or **.

    +

    external_service_op_request_latency_hist

    +

    This histogram holds the latency values for requests of this operation to the specified server.

    @@ -26867,21 +30867,15 @@

    aggr_space_perfo

    - - - - - - - - - + + +
    RESTapi/storage/aggregatesspace.block_storage.inactive_user_data_percentconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.performance-tier-inactive-user-data-percentconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances external_service_oprequest_latency_hist
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yaml
    -

    aggr_space_performance_tier_used

    -

    A summation of volume footprints (including volume guarantees), in bytes. This includes all of the volume footprints in the block_storage tier and the cloud_storage tier.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    +

    fabricpool_average_latency

    +

    This counter is deprecated.Average latencies executed during various phases of command execution. The execution-start latency represents the average time taken to start executing an operation. The request-prepare latency represent the average time taken to prepare the commplete request that needs to be sent to the server. The send latency represents the average time taken to send requests to the server. The execution-start-to-send-complete represents the average time taken to send an operation out since its execution started. The execution-start-to-first-byte-received represent the average time taken to receive the first byte of a response since the command's request execution started. These counters can be used to identify performance bottlenecks within the object store client module.

    @@ -26893,15 +30887,15 @@

    aggr_space_performance_tier_used

    - - - - + + + +
    RESTapi/storage/aggregatesspace.footprintconf/rest/9.12.0/aggr.yamlZAPIperf-object-get-instances object_store_client_opaverage_latency
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml
    -

    aggr_space_performance_tier_used_percent

    -

    A summation of volume footprints inside the aggregate,as a percentage. A volume's footprint is the amount of space being used for the volume in the aggregate.

    +

    fabricpool_cloud_bin_op_latency_average

    +

    Cloud bin operation latency average in milliseconds.

    @@ -26914,14 +30908,20 @@

    aggr_space_performance_tier_us

    - - - + + + + + + + + +
    RESTapi/storage/aggregatesspace.footprint_percentconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/wafl_comp_aggr_vol_bincloud_bin_op_latency_average
    Unit: millisec
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_comp_aggr_vol_bin.yaml
    ZAPIperf-object-get-instances wafl_comp_aggr_vol_bincloud_bin_op_latency_average
    Unit: millisec
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml
    -

    aggr_space_physical_used

    -

    Total physical used size of an aggregate in bytes.

    +

    fabricpool_cloud_bin_operation

    +

    Cloud bin operation counters.

    @@ -26934,20 +30934,20 @@

    aggr_space_physical_used

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesspace.block_storage.physical_usedconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/wafl_comp_aggr_vol_bincloud_bin_op
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/wafl_comp_aggr_vol_bin.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.physical-usedconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances wafl_comp_aggr_vol_bincloud_bin_operation
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml
    -

    aggr_space_physical_used_percent

    -

    Physical used percentage.

    +

    fabricpool_get_throughput_bytes

    +

    This counter is deprecated. Counter that indicates the throughput for GET command in bytes per second.

    @@ -26959,21 +30959,15 @@

    aggr_space_physical_used_percent

    - - - - - - - - - + + +
    RESTapi/storage/aggregatesspace.block_storage.physical_used_percentconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.physical-used-percentconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances object_store_client_opget_throughput_bytes
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml
    -

    aggr_space_reserved

    -

    The total disk space in bytes that is reserved on the referenced file system. The reserved space is already counted in the used space, so this element can be used to see what portion of the used space represents space reserved for future use.

    +

    fabricpool_put_throughput_bytes

    +

    This counter is deprecated. Counter that indicates the throughput for PUT command in bytes per second.

    @@ -26986,14 +30980,14 @@

    aggr_space_reservedUnit:
    Type:
    Base: +

    conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml
    -

    aggr_space_sis_saved

    -

    Amount of space saved in bytes by storage efficiency.

    +

    fabricpool_stats

    +

    This counter is deprecated. Counter that indicates the number of object store operations sent, and their success and failure counts. The objstore_client_op_name array indicate the operation name such as PUT, GET, etc. The objstore_client_op_stats_name array contain the total number of operations, their success and failure counter for each operation.

    @@ -27005,21 +30999,15 @@

    aggr_space_sis_savedUnit:
    Type:
    Base: +

    conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml
    -

    aggr_space_sis_saved_percent

    -

    Percentage of space saved by storage efficiency.

    +

    fabricpool_throughput_ops

    +

    Counter that indicates the throughput for commands in ops per second.

    @@ -27031,21 +31019,15 @@

    aggr_space_sis_saved_percentUnit: per_sec
    Type: rate,no-zero-values
    Base: +

    conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml
    -

    aggr_space_sis_shared_count

    -

    Amount of shared bytes counted by storage efficiency.

    +

    fcp_avg_other_latency

    +

    Average latency for operations other than read and write

    @@ -27058,20 +31040,20 @@

    aggr_space_sis_shared_countUnit: microsec
    Type: average
    Base: other_ops +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.sis-shared-countconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances fcp_portavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    aggr_space_total

    -

    Total usable space in bytes, not including WAFL reserve and aggregate Snapshot copy reserve.

    +

    fcp_avg_read_latency

    +

    Average latency for read operations

    @@ -27084,20 +31066,20 @@

    aggr_space_totalUnit: microsec
    Type: average
    Base: read_ops +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.size-totalconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances fcp_portavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    aggr_space_used

    -

    Space used or reserved in bytes. Includes volume guarantees and aggregate metadata.

    +

    fcp_avg_write_latency

    +

    Average latency for write operations

    @@ -27110,20 +31092,20 @@

    aggr_space_usedUnit: microsec
    Type: average
    Base: write_ops +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.size-usedconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances fcp_portavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    aggr_space_used_percent

    -

    The percentage of disk space currently in use on the referenced file system

    +

    fcp_discarded_frames_count

    +

    Number of discarded frames.

    @@ -27136,20 +31118,20 @@

    aggr_space_used_percent

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesspace.block_storage.used, space.block_storage.sizeconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/fcpdiscarded_frames_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-space-attributes.percent-used-capacityconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances fcp_portdiscarded_frames_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    aggr_total_logical_used

    -

    Logical used

    +

    fcp_fabric_connected_speed

    +

    The negotiated data rate between the target FC port and the fabric in gigabits per second.

    @@ -27162,20 +31144,14 @@

    aggr_total_logical_used

    - - - - - - - - - + + +
    RESTapi/storage/aggregatesspace.efficiency.logical_usedconf/rest/9.12.0/aggr.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-logical-usedconf/zapi/cdot/9.9.0/aggr_efficiency.yamlapi/network/fc/portsfabric.connected_speedconf/rest/9.6.0/fcp.yaml
    -

    aggr_total_physical_used

    -

    Total Physical Used

    +

    fcp_int_count

    +

    Number of interrupts

    @@ -27188,20 +31164,20 @@

    aggr_total_physical_used

    - - - + + + - - - + + +
    RESTapi/storage/aggregatesspace.efficiency.logical_used, space.efficiency.savingsconf/rest/9.12.0/aggr.yamlapi/cluster/counter/tables/fcpinterrupt_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIaggr-efficiency-get-iteraggr-efficiency-info.aggr-efficiency-cumulative-info.total-physical-usedconf/zapi/cdot/9.9.0/aggr_efficiency.yamlperf-object-get-instances fcp_portint_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    aggr_volume_count

    -

    The aggregate's volume count, which includes both FlexVols and FlexGroup constituents.

    +

    fcp_invalid_crc

    +

    Number of invalid cyclic redundancy checks (CRC count)

    @@ -27214,20 +31190,20 @@

    aggr_volume_countUnit: none
    Type: delta
    Base: +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIaggr-get-iteraggr-attributes.aggr-volume-count-attributes.flexvol-countconf/zapi/cdot/9.8.0/aggr.yamlperf-object-get-instances fcp_portinvalid_crc
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    cifs_session_connection_count

    -

    A counter used to track requests that are sent to the volumes to the node.

    +

    fcp_invalid_transmission_word

    +

    Number of invalid transmission words

    @@ -27240,20 +31216,20 @@

    cifs_session_connection_countUnit: none
    Type: delta
    Base: +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIcifs-session-get-itercifs-session.connection-countconf/zapi/cdot/9.8.0/cifs_session.yamlperf-object-get-instances fcp_portinvalid_transmission_word
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    cloud_target_used

    -

    The amount of cloud space used by all the aggregates attached to the target, in bytes. This field is only populated for FabricPool targets. The value is recalculated once every 5 minutes.

    +

    fcp_isr_count

    +

    Number of interrupt responses

    @@ -27266,20 +31242,20 @@

    cloud_target_usedUnit: none
    Type: delta
    Base: +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIaggr-object-store-config-get-iteraggr-object-store-config-info.used-spaceconf/zapi/cdot/9.10.0/aggr_object_store_config.yamlperf-object-get-instances fcp_portisr_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    cluster_new_status

    -

    It is an indicator of the overall health status of the cluster, with a value of 1 indicating a healthy status and a value of 0 indicating an unhealthy status.

    +

    fcp_lif_avg_latency

    +

    Average latency for FCP operations

    @@ -27292,20 +31268,20 @@

    cluster_new_statusUnit: microsec
    Type: average
    Base: total_ops +

    - - - + + +
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/status.yamlperf-object-get-instances fcp_lifavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    cluster_subsystem_outstanding_alerts

    -

    Number of outstanding alerts

    +

    fcp_lif_avg_other_latency

    +

    Average latency for operations other than read and write

    @@ -27318,20 +31294,20 @@

    cluster_subsystem_outstanding_aler

    - - - + + + - - - + + +
    RESTapi/private/cli/system/health/subsystemoutstanding_alert_countconf/rest/9.12.0/subsystem.yamlapi/cluster/counter/tables/fcp_lifaverage_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIdiagnosis-subsystem-config-get-iterdiagnosis-subsystem-config-info.outstanding-alert-countconf/zapi/cdot/9.8.0/subsystem.yamlperf-object-get-instances fcp_lifavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    cluster_subsystem_suppressed_alerts

    -

    Number of suppressed alerts

    +

    fcp_lif_avg_read_latency

    +

    Average latency for read operations

    @@ -27344,20 +31320,20 @@

    cluster_subsystem_suppressed_alerts

    - - - + + + - - - + + +
    RESTapi/private/cli/system/health/subsystemsuppressed_alert_countconf/rest/9.12.0/subsystem.yamlapi/cluster/counter/tables/fcp_lifaverage_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIdiagnosis-subsystem-config-get-iterdiagnosis-subsystem-config-info.suppressed-alert-countconf/zapi/cdot/9.8.0/subsystem.yamlperf-object-get-instances fcp_lifavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    copy_manager_bce_copy_count_curr

    -

    Current number of copy requests being processed by the Block Copy Engine.

    +

    fcp_lif_avg_write_latency

    +

    Average latency for write operations

    @@ -27370,20 +31346,20 @@

    copy_manager_bce_copy_count_curr

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/copy_managerblock_copy_engine_current_copy_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yamlapi/cluster/counter/tables/fcp_lifaverage_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIperf-object-get-instances copy_managerbce_copy_count_curr
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yamlperf-object-get-instances fcp_lifavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    copy_manager_kb_copied

    -

    Sum of kilo-bytes copied.

    +

    fcp_lif_other_ops

    +

    Number of operations that are not read or write.

    @@ -27396,20 +31372,20 @@

    copy_manager_kb_copied

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/copy_managerKB_copied
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yamlapi/cluster/counter/tables/fcp_lifother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIperf-object-get-instances copy_managerKB_copied
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yamlperf-object-get-instances fcp_lifother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    copy_manager_ocs_copy_count_curr

    -

    Current number of copy requests being processed by the ONTAP copy subsystem.

    +

    fcp_lif_read_data

    +

    Amount of data read from the storage system

    @@ -27422,20 +31398,20 @@

    copy_manager_ocs_copy_count_curr

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/copy_managerontap_copy_subsystem_current_copy_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yamlapi/cluster/counter/tables/fcp_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIperf-object-get-instances copy_managerocs_copy_count_curr
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yamlperf-object-get-instances fcp_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    copy_manager_sce_copy_count_curr

    -

    Current number of copy requests being processed by the System Continuous Engineering.

    +

    fcp_lif_read_ops

    +

    Number of read operations

    @@ -27448,20 +31424,20 @@

    copy_manager_sce_copy_count_curr

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/copy_managersystem_continuous_engineering_current_copy_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yamlapi/cluster/counter/tables/fcp_lifread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIperf-object-get-instances copy_managersce_copy_count_curr
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yamlperf-object-get-instances fcp_lifread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    copy_manager_spince_copy_count_curr

    -

    Current number of copy requests being processed by the SpinCE.

    +

    fcp_lif_total_ops

    +

    Total number of operations.

    @@ -27474,20 +31450,20 @@

    copy_manager_spince_copy_count_curr

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/copy_managerspince_current_copy_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/copy_manager.yamlapi/cluster/counter/tables/fcp_liftotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIperf-object-get-instances copy_managerspince_copy_count_curr
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/copy_manager.yamlperf-object-get-instances fcp_liftotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    disk_busy

    -

    The utilization percent of the disk

    +

    fcp_lif_write_data

    +

    Amount of data written to the storage system

    @@ -27500,20 +31476,20 @@

    disk_busyUnit: percent
    Type: percent
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcp_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    disk_bytes_per_sector

    -

    Bytes per sector.

    +

    fcp_lif_write_ops

    +

    Number of write operations

    @@ -27526,20 +31502,20 @@

    disk_bytes_per_sectorUnit: per_sec
    Type: rate
    Base: +

    - - - + + +
    conf/restperf/9.12.0/fcp_lif.yaml
    ZAPIstorage-disk-get-iterstorage-disk-info.disk-inventory-info.bytes-per-sectorconf/zapi/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_lifwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yaml
    -

    disk_capacity

    -

    Disk capacity in MB

    + +

    Number of times the Fibre Channel link was lost

    @@ -27552,20 +31528,20 @@

    disk_capacity

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcplink.down
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portlink_down
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    disk_cp_read_chain

    -

    Average number of blocks transferred in each consistency point read operation during a CP

    + +

    Number of link failures

    @@ -27578,20 +31554,20 @@

    disk_cp_read_chainUnit: none
    Type: average
    Base: cp_read_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcplink_failure
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portlink_failure
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    disk_cp_read_latency

    -

    Average latency per block in microseconds for consistency point read operations

    + +

    Number of times the Fibre Channel link was established

    @@ -27604,20 +31580,20 @@

    disk_cp_read_latencyUnit: microsec
    Type: average
    Base: cp_read_blocks -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcplink.up
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portlink_up
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_cp_reads

    -

    Number of disk read operations initiated each second for consistency point processing

    +

    fcp_loss_of_signal

    +

    Number of times this port lost signal

    @@ -27630,20 +31606,20 @@

    disk_cp_reads

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcploss_of_signal
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portloss_of_signal
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    disk_io_pending

    -

    Average number of I/Os issued to the disk for which we have not yet received the response

    +

    fcp_loss_of_sync

    +

    Number of times this port lost sync

    @@ -27656,20 +31632,20 @@

    disk_io_pendingUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcploss_of_sync
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portloss_of_sync
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    disk_io_queued

    -

    Number of I/Os queued to the disk but not yet issued

    +

    fcp_max_speed

    +

    The maximum speed supported by the FC port in gigabits per second.

    @@ -27682,20 +31658,14 @@

    disk_io_queuedUnit: none
    Type: average
    Base: base_for_disk_busy -

    - - - - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlapi/network/fc/portsspeed.maximumconf/rest/9.6.0/fcp.yaml
    -

    disk_power_on_hours

    -

    Hours powered on.

    +

    fcp_nvmf_avg_other_latency

    +

    Average latency for operations other than read and write (FC-NVMe)

    @@ -27708,14 +31678,20 @@

    disk_power_on_hoursUnit: microsec
    Type: average
    Base: nvmf.other_ops +

    + + + + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_avg_other_latency
    Unit: microsec
    Type: average
    Base: nvmf_other_ops
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_sectors

    -

    Number of sectors on the disk.

    +

    fcp_nvmf_avg_read_latency

    +

    Average latency for read operations (FC-NVMe)

    @@ -27728,20 +31704,20 @@

    disk_sectors

    - - - + + + - - - + + +
    RESTapi/storage/diskssector_countconf/rest/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf.average_read_latency
    Unit: microsec
    Type: average
    Base: nvmf.read_ops
    conf/restperf/9.12.0/fcp.yaml
    ZAPIstorage-disk-get-iterstorage-disk-info.disk-inventory-info.capacity-sectorsconf/zapi/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_avg_read_latency
    Unit: microsec
    Type: average
    Base: nvmf_read_ops
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_stats_average_latency

    -

    Average I/O latency across all active paths, in milliseconds.

    +

    fcp_nvmf_avg_remote_other_latency

    +

    Average latency for remote operations other than read and write (FC-NVMe)

    @@ -27754,20 +31730,20 @@

    disk_stats_average_latencyUnit: microsec
    Type: average
    Base: nvmf_remote.other_ops +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIstorage-disk-get-iterstorage-disk-info.disk-stats-info.average-latencyconf/zapi/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_avg_remote_other_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote_other_ops
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_stats_io_kbps

    -

    Total Disk Throughput in KBPS Across All Active Paths

    +

    fcp_nvmf_avg_remote_read_latency

    +

    Average latency for remote read operations (FC-NVMe)

    @@ -27780,20 +31756,20 @@

    disk_stats_io_kbpsUnit: microsec
    Type: average
    Base: nvmf_remote.read_ops +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIstorage-disk-get-iterstorage-disk-info.disk-stats-info.disk-io-kbpsconf/zapi/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_avg_remote_read_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote_read_ops
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_stats_sectors_read

    -

    Number of Sectors Read

    +

    fcp_nvmf_avg_remote_write_latency

    +

    Average latency for remote write operations (FC-NVMe)

    @@ -27806,20 +31782,20 @@

    disk_stats_sectors_read

    - - - + + + - - - + + +
    RESTapi/private/cli/disksectors_readconf/rest/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf.average_remote_write_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote.write_ops
    conf/restperf/9.12.0/fcp.yaml
    ZAPIstorage-disk-get-iterstorage-disk-info.disk-stats-info.sectors-readconf/zapi/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_avg_remote_write_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote_write_ops
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_stats_sectors_written

    -

    Number of Sectors Written

    +

    fcp_nvmf_avg_write_latency

    +

    Average latency for write operations (FC-NVMe)

    @@ -27832,20 +31808,20 @@

    disk_stats_sectors_writtenUnit: microsec
    Type: average
    Base: nvmf.write_ops +

    - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIstorage-disk-get-iterstorage-disk-info.disk-stats-info.sectors-writtenconf/zapi/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_avg_write_latency
    Unit: microsec
    Type: average
    Base: nvmf_write_ops
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_total_data

    -

    Total throughput for user operations per second

    +

    fcp_nvmf_caw_data

    +

    Amount of CAW data sent to the storage system (FC-NVMe)

    @@ -27858,20 +31834,20 @@

    disk_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf.caw_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_caw_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_total_transfers

    -

    Total number of disk operations involving data transfer initiated per second

    +

    fcp_nvmf_caw_ops

    +

    Number of FC-NVMe CAW operations

    @@ -27884,20 +31860,20 @@

    disk_total_transfersUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf.caw_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_caw_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_uptime

    -

    Number of seconds the drive has been powered on

    +

    fcp_nvmf_command_slots

    +

    Number of command slots that have been used by initiators logging into this port. This shows the command fan-in on the port.

    @@ -27910,20 +31886,20 @@

    disk_uptime

    - - - + + + - - - + + +
    RESTapi/storage/disksstats.power_on_hours, 60, 60conf/rest/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf.command_slots
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIstorage-disk-get-iterstorage-disk-info.disk-stats-info.power-on-time-intervalconf/zapi/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_command_slots
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_usable_size

    -

    Usable size of each disk, in bytes.

    +

    fcp_nvmf_other_ops

    +

    Number of NVMF operations that are not read or write.

    @@ -27936,14 +31912,20 @@

    disk_usable_sizeUnit: per_sec
    Type: rate
    Base: +

    + + + + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_user_read_blocks

    -

    Number of blocks transferred for user read operations per second

    +

    fcp_nvmf_read_data

    +

    Amount of data read from the storage system (FC-NVMe)

    @@ -27956,20 +31938,20 @@

    disk_user_read_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_user_read_chain

    -

    Average number of blocks transferred in each user read operation

    +

    fcp_nvmf_read_ops

    +

    Number of FC-NVMe read operations

    @@ -27982,20 +31964,20 @@

    disk_user_read_chainUnit: none
    Type: average
    Base: user_read_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_user_read_latency

    -

    Average latency per block in microseconds for user read operations

    +

    fcp_nvmf_remote_caw_data

    +

    Amount of remote CAW data sent to the storage system (FC-NVMe)

    @@ -28008,20 +31990,20 @@

    disk_user_read_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf_remote.caw_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_remote_caw_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_user_reads

    -

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests

    +

    fcp_nvmf_remote_caw_ops

    +

    Number of FC-NVMe remote CAW operations

    @@ -28034,20 +32016,20 @@

    disk_user_readsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf_remote.caw_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_remote_caw_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_user_write_blocks

    -

    Number of blocks transferred for user write operations per second

    +

    fcp_nvmf_remote_other_ops

    +

    Number of NVMF remote operations that are not read or write.

    @@ -28060,20 +32042,20 @@

    disk_user_write_blocks

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf_remote.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_remote_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_user_write_chain

    -

    Average number of blocks transferred in each user write operation

    +

    fcp_nvmf_remote_read_data

    +

    Amount of remote data read from the storage system (FC-NVMe)

    @@ -28086,20 +32068,20 @@

    disk_user_write_chainUnit: none
    Type: average
    Base: user_write_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf_remote.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_remote_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_user_write_latency

    -

    Average latency per block in microseconds for user write operations

    +

    fcp_nvmf_remote_read_ops

    +

    Number of FC-NVMe remote read operations

    @@ -28112,20 +32094,20 @@

    disk_user_write_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf_remote.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_remote_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    disk_user_writes

    -

    Number of disk write operations initiated each second for storing data or metadata associated with user requests

    +

    fcp_nvmf_remote_total_data

    +

    Amount of remote FC-NVMe traffic to and from the storage system

    @@ -28138,20 +32120,20 @@

    disk_user_writesUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/fcpnvmf_remote.total_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances fcp_portnvmf_remote_total_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    environment_sensor_average_ambient_temperature

    -

    Average temperature of all ambient sensors for node in Celsius.

    +

    fcp_nvmf_remote_total_ops

    +

    Total number of remote FC-NVMe operations

    @@ -28164,20 +32146,20 @@

    environment_sensor_avera

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpnvmf_remote.total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portnvmf_remote_total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    environment_sensor_average_fan_speed

    -

    Average fan speed for node in rpm.

    +

    fcp_nvmf_remote_write_data

    +

    Amount of remote data written to the storage system (FC-NVMe)

    @@ -28190,20 +32172,20 @@

    environment_sensor_average_fan_spe

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpnvmf_remote.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portnvmf_remote_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    environment_sensor_average_temperature

    -

    Average temperature of all non-ambient sensors for node in Celsius.

    +

    fcp_nvmf_remote_write_ops

    +

    Number of FC-NVMe remote write operations

    @@ -28216,20 +32198,20 @@

    environment_sensor_average_tempe

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpnvmf_remote.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portnvmf_remote_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    environment_sensor_max_fan_speed

    -

    Maximum fan speed for node in rpm.

    +

    fcp_nvmf_total_data

    +

    Amount of FC-NVMe traffic to and from the storage system

    @@ -28242,20 +32224,20 @@

    environment_sensor_max_fan_speed

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpnvmf.total_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portnvmf_total_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    environment_sensor_max_temperature

    -

    Maximum temperature of all non-ambient sensors for node in Celsius.

    +

    fcp_nvmf_total_ops

    +

    Total number of FC-NVMe operations

    @@ -28268,20 +32250,20 @@

    environment_sensor_max_temperature

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpnvmf.total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portnvmf_total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    environment_sensor_min_ambient_temperature

    -

    Minimum temperature of all ambient sensors for node in Celsius.

    +

    fcp_nvmf_write_data

    +

    Amount of data written to the storage system (FC-NVMe)

    @@ -28294,20 +32276,20 @@

    environment_sensor_min_ambie

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpnvmf.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portnvmf_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    environment_sensor_min_fan_speed

    -

    Minimum fan speed for node in rpm.

    +

    fcp_nvmf_write_ops

    +

    Number of FC-NVMe write operations

    @@ -28320,20 +32302,20 @@

    environment_sensor_min_fan_speed

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpnvmf.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portnvmf_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yaml
    -

    environment_sensor_min_temperature

    -

    Minimum temperature of all non-ambient sensors for node in Celsius.

    +

    fcp_other_ops

    +

    Number of operations that are not read or write.

    @@ -28346,20 +32328,20 @@

    environment_sensor_min_temperature

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    environment_sensor_power

    -

    Power consumed by a node in Watts.

    +

    fcp_prim_seq_err

    +

    Number of primitive sequence errors

    @@ -28372,20 +32354,20 @@

    environment_sensor_power

    - - - + + + - - - + + +
    RESTNAHarvest generatedconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpprimitive_seq_err
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPINAHarvest generatedconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portprim_seq_err
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    environment_sensor_threshold_value

    -

    Provides the sensor reading.

    +

    fcp_queue_full

    +

    Number of times a queue full condition occurred.

    @@ -28398,20 +32380,20 @@

    environment_sensor_threshold_value

    - - - + + + - - - + + +
    RESTapi/cluster/sensorsvalueconf/rest/9.12.0/sensor.yamlapi/cluster/counter/tables/fcpqueue_full
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIenvironment-sensors-get-iterenvironment-sensors-info.threshold-sensor-valueconf/zapi/cdot/9.8.0/sensor.yamlperf-object-get-instances fcp_portqueue_full
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    external_service_op_num_not_found_responses

    -

    Number of 'Not Found' responses for calls to this operation.

    +

    fcp_read_data

    +

    Amount of data read from the storage system

    @@ -28423,15 +32405,21 @@

    external_service_op_num_not

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances external_service_opnum_not_found_responses
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yamlperf-object-get-instances fcp_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    external_service_op_num_request_failures

    -

    A cumulative count of all request failures.

    +

    fcp_read_ops

    +

    Number of read operations

    @@ -28443,15 +32431,21 @@

    external_service_op_num_reques

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances external_service_opnum_request_failures
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yamlperf-object-get-instances fcp_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    external_service_op_num_requests_sent

    -

    Number of requests sent to this service.

    +

    fcp_reset_count

    +

    Number of physical port resets

    @@ -28463,15 +32457,21 @@

    external_service_op_num_requests_

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpreset_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances external_service_opnum_requests_sent
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yamlperf-object-get-instances fcp_portreset_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    external_service_op_num_responses_received

    -

    Number of responses received from the server (does not include timeouts).

    +

    fcp_shared_int_count

    +

    Number of shared interrupts

    @@ -28483,15 +32483,21 @@

    external_service_op_num_resp

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpshared_interrupt_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances external_service_opnum_responses_received
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yamlperf-object-get-instances fcp_portshared_int_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    external_service_op_num_successful_responses

    -

    Number of successful responses to this operation.

    +

    fcp_spurious_int_count

    +

    Number of spurious interrupts

    @@ -28503,15 +32509,21 @@

    external_service_op_num_su

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpspurious_interrupt_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances external_service_opnum_successful_responses
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yamlperf-object-get-instances fcp_portspurious_int_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    external_service_op_num_timeouts

    -

    Number of times requests to the server for this operation timed out, meaning no response was recevied in a given time period.

    +

    fcp_threshold_full

    +

    Number of times the total number of outstanding commands on the port exceeds the threshold supported by this port.

    @@ -28523,15 +32535,21 @@

    external_service_op_num_timeouts

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpthreshold_full
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances external_service_opnum_timeouts
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yamlperf-object-get-instances fcp_portthreshold_full
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    external_service_op_request_latency

    -

    Average latency of requests for operations of this type on this server.

    +

    fcp_total_data

    +

    Amount of FCP traffic to and from the storage system

    @@ -28543,15 +32561,21 @@

    external_service_op_request_latency

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcptotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances external_service_oprequest_latency
    Unit: microsec
    Type: average
    Base: num_requests_sent
    conf/zapiperf/cdot/9.8.0/external_service_operation.yamlperf-object-get-instances fcp_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    external_service_op_request_latency_hist

    -

    This histogram holds the latency values for requests of this operation to the specified server.

    +

    fcp_total_ops

    +

    Total number of FCP operations

    @@ -28563,15 +32587,21 @@

    external_service_op_request_la

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcptotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances external_service_oprequest_latency_hist
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/external_service_operation.yamlperf-object-get-instances fcp_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    fabricpool_average_latency

    -

    This counter is deprecated.Average latencies executed during various phases of command execution. The execution-start latency represents the average time taken to start executing an operation. The request-prepare latency represent the average time taken to prepare the commplete request that needs to be sent to the server. The send latency represents the average time taken to send requests to the server. The execution-start-to-send-complete represents the average time taken to send an operation out since its execution started. The execution-start-to-first-byte-received represent the average time taken to receive the first byte of a response since the command's request execution started. These counters can be used to identify performance bottlenecks within the object store client module.

    +

    fcp_write_data

    +

    Amount of data written to the storage system

    @@ -28583,15 +32613,21 @@

    fabricpool_average_latencyUnit: b_per_sec
    Type: rate
    Base: +

    + + - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances object_store_client_opaverage_latency
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/object_store_client_op.yamlperf-object-get-instances fcp_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    fabricpool_cloud_bin_op_latency_average

    -

    Cloud bin operation latency average in milliseconds.

    +

    fcp_write_ops

    +

    Number of write operations

    @@ -28604,20 +32640,20 @@

    fabricpool_cloud_bin_op_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_comp_aggr_vol_bincloud_bin_op_latency_average
    Unit: millisec
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_comp_aggr_vol_bin.yamlapi/cluster/counter/tables/fcpwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances wafl_comp_aggr_vol_bincloud_bin_op_latency_average
    Unit: millisec
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yamlperf-object-get-instances fcp_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yaml
    -

    fabricpool_cloud_bin_operation

    -

    Cloud bin operation counters.

    +

    fcvi_firmware_invalid_crc_count

    +

    Firmware reported invalid CRC count

    @@ -28630,20 +32666,20 @@

    fabricpool_cloud_bin_operationUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_comp_aggr_vol_bin.yamlapi/cluster/counter/tables/fcvifirmware.invalid_crc_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances wafl_comp_aggr_vol_bincloud_bin_operation
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yamlperf-object-get-instances fcvifw_invalid_crc
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fabricpool_get_throughput_bytes

    -

    This counter is deprecated. Counter that indicates the throughput for GET command in bytes per second.

    +

    fcvi_firmware_invalid_transmit_word_count

    +

    Firmware reported invalid transmit word count

    @@ -28655,15 +32691,21 @@

    fabricpool_get_throughput_bytes

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcvifirmware.invalid_transmit_word_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances object_store_client_opget_throughput_bytes
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/object_store_client_op.yamlperf-object-get-instances fcvifw_invalid_xmit_words
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fabricpool_put_throughput_bytes

    -

    This counter is deprecated. Counter that indicates the throughput for PUT command in bytes per second.

    + +

    Firmware reported link failure count

    @@ -28675,15 +32717,21 @@

    fabricpool_put_throughput_bytes

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/fcvifirmware.link_failure_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances object_store_client_opput_throughput_bytes
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/object_store_client_op.yamlperf-object-get-instances fcvifw_link_failure
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fabricpool_stats

    -

    This counter is deprecated. Counter that indicates the number of object store operations sent, and their success and failure counts. The objstore_client_op_name array indicate the operation name such as PUT, GET, etc. The objstore_client_op_stats_name array contain the total number of operations, their success and failure counter for each operation.

    +

    fcvi_firmware_loss_of_signal_count

    +

    Firmware reported loss of signal count

    @@ -28695,15 +32743,21 @@

    fabricpool_statsUnit: none
    Type: delta
    Base: +

    + + - - - + + +
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances object_store_client_opstats
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/object_store_client_op.yamlperf-object-get-instances fcvifw_loss_of_signal
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fabricpool_throughput_ops

    -

    Counter that indicates the throughput for commands in ops per second.

    +

    fcvi_firmware_loss_of_sync_count

    +

    Firmware reported loss of sync count

    @@ -28715,15 +32769,21 @@

    fabricpool_throughput_opsUnit: none
    Type: delta
    Base: +

    + + - - - + + +
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances object_store_client_opthroughput_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/object_store_client_op.yamlperf-object-get-instances fcvifw_loss_of_sync
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fcp_avg_other_latency

    -

    Average latency for operations other than read and write

    +

    fcvi_firmware_systat_discard_frames

    +

    Firmware reported SyStatDiscardFrames value

    @@ -28736,20 +32796,20 @@

    fcp_avg_other_latencyUnit: microsec
    Type: average
    Base: other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/fcvifirmware.systat.discard_frames
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances fcp_portavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances fcvifw_SyStatDiscardFrames
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fcp_avg_read_latency

    -

    Average latency for read operations

    +

    fcvi_hard_reset_count

    +

    Number of times hard reset of FCVI adapter got issued.

    @@ -28762,20 +32822,20 @@

    fcp_avg_read_latencyUnit: microsec
    Type: average
    Base: read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/fcvihard_reset_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances fcp_portavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances fcvihard_reset_cnt
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fcp_avg_write_latency

    -

    Average latency for write operations

    +

    fcvi_rdma_write_avg_latency

    +

    Average RDMA write I/O latency.

    @@ -28788,20 +32848,20 @@

    fcp_avg_write_latencyUnit: microsec
    Type: average
    Base: write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/fcvirdma.write_average_latency
    Unit: microsec
    Type: average
    Base: rdma.write_ops
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances fcp_portavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances fcvirdma_write_avg_latency
    Unit: microsec
    Type: average
    Base: rdma_write_ops
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fcp_discarded_frames_count

    -

    Number of discarded frames.

    +

    fcvi_rdma_write_ops

    +

    Number of RDMA write I/Os issued per second.

    @@ -28814,20 +32874,20 @@

    fcp_discarded_frames_countUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/fcvirdma.write_ops
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances fcp_portdiscarded_frames_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances fcvirdma_write_ops
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fcp_fabric_connected_speed

    -

    The negotiated data rate between the target FC port and the fabric in gigabits per second.

    +

    fcvi_rdma_write_throughput

    +

    RDMA write throughput in bytes per second.

    @@ -28840,14 +32900,20 @@

    fcp_fabric_connected_speedUnit: b_per_sec
    Type: rate
    Base: +

    + + + + + +
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances fcvirdma_write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fcp_int_count

    -

    Number of interrupts

    +

    fcvi_soft_reset_count

    +

    Number of times soft reset of FCVI adapter got issued.

    @@ -28860,20 +32926,20 @@

    fcp_int_count

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpinterrupt_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/fcvisoft_reset_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yaml
    ZAPIperf-object-get-instances fcp_portint_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances fcvisoft_reset_cnt
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yaml
    -

    fcp_invalid_crc

    -

    Number of invalid cyclic redundancy checks (CRC count)

    +

    flashcache_accesses

    +

    External cache accesses per second

    @@ -28886,20 +32952,20 @@

    fcp_invalid_crcUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/external_cacheaccesses
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_portinvalid_crc
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances ext_cache_objaccesses
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_invalid_transmission_word

    -

    Number of invalid transmission words

    +

    flashcache_disk_reads_replaced

    +

    Estimated number of disk reads per second replaced by cache

    @@ -28912,20 +32978,20 @@

    fcp_invalid_transmission_wordUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/external_cachedisk_reads_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_portinvalid_transmission_word
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances ext_cache_objdisk_reads_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_isr_count

    -

    Number of interrupt responses

    +

    flashcache_evicts

    +

    Number of blocks evicted from the external cache to make room for new blocks

    @@ -28938,20 +33004,20 @@

    fcp_isr_count

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpisr.count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/external_cacheevicts
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_portisr_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances ext_cache_objevicts
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_avg_latency

    -

    Average latency for FCP operations

    +

    flashcache_hit

    +

    Number of WAFL buffers served off the external cache

    @@ -28964,20 +33030,20 @@

    fcp_lif_avg_latencyUnit: microsec
    Type: average
    Base: total_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cachehit.total
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objhit
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_avg_other_latency

    -

    Average latency for operations other than read and write

    +

    flashcache_hit_directory

    +

    Number of directory buffers served off the external cache

    @@ -28990,20 +33056,20 @@

    fcp_lif_avg_other_latencyUnit: microsec
    Type: average
    Base: other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cachehit.directory
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objhit_directory
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_avg_read_latency

    -

    Average latency for read operations

    +

    flashcache_hit_indirect

    +

    Number of indirect file buffers served off the external cache

    @@ -29016,20 +33082,20 @@

    fcp_lif_avg_read_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcp_lifaverage_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cachehit.indirect
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objhit_indirect
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_avg_write_latency

    -

    Average latency for write operations

    +

    flashcache_hit_metadata_file

    +

    Number of metadata file buffers served off the external cache

    @@ -29042,20 +33108,20 @@

    fcp_lif_avg_write_latencyUnit: microsec
    Type: average
    Base: write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cachehit.metadata_file
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objhit_metadata_file
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_other_ops

    -

    Number of operations that are not read or write.

    +

    flashcache_hit_normal_lev0

    +

    Number of normal level 0 WAFL buffers served off the external cache

    @@ -29068,20 +33134,20 @@

    fcp_lif_other_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cachehit.normal_level_zero
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objhit_normal_lev0
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_read_data

    -

    Amount of data read from the storage system

    +

    flashcache_hit_percent

    +

    External cache hit rate

    @@ -29094,20 +33160,20 @@

    fcp_lif_read_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cachehit.percent
    Unit: percent
    Type: average
    Base: accesses
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objhit_percent
    Unit: percent
    Type: percent
    Base: accesses
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_read_ops

    -

    Number of read operations

    +

    flashcache_inserts

    +

    Number of WAFL buffers inserted into the external cache

    @@ -29120,20 +33186,20 @@

    fcp_lif_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cacheinserts
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objinserts
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_total_ops

    -

    Total number of operations.

    +

    flashcache_invalidates

    +

    Number of blocks invalidated in the external cache

    @@ -29146,20 +33212,20 @@

    fcp_lif_total_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cacheinvalidates
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_liftotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objinvalidates
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_write_data

    -

    Amount of data written to the storage system

    +

    flashcache_miss

    +

    External cache misses

    @@ -29172,20 +33238,20 @@

    fcp_lif_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cachemiss.total
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objmiss
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_lif_write_ops

    -

    Number of write operations

    +

    flashcache_miss_directory

    +

    External cache misses accessing directory buffers

    @@ -29198,20 +33264,20 @@

    fcp_lif_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp_lif.yamlapi/cluster/counter/tables/external_cachemiss.directory
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_lifwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp_lif.yamlperf-object-get-instances ext_cache_objmiss_directory
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    - -

    Number of times the Fibre Channel link was lost

    +

    flashcache_miss_indirect

    +

    External cache misses accessing indirect file buffers

    @@ -29224,20 +33290,20 @@ - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcplink.down
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/external_cachemiss.indirect
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_portlink_down
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances ext_cache_objmiss_indirect
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    - -

    Number of link failures

    +

    flashcache_miss_metadata_file

    +

    External cache misses accessing metadata file buffers

    @@ -29250,20 +33316,20 @@ + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/external_cachemiss.metadata_file
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_portlink_failure
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances ext_cache_objmiss_metadata_file
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    - -

    Number of times the Fibre Channel link was established

    +

    flashcache_miss_normal_lev0

    +

    External cache misses accessing normal level 0 buffers

    @@ -29276,20 +33342,20 @@ - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcplink.up
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/external_cachemiss.normal_level_zero
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_portlink_up
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances ext_cache_objmiss_normal_lev0
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_loss_of_signal

    -

    Number of times this port lost signal

    +

    flashcache_usage

    +

    Percentage of blocks in external cache currently containing valid data

    @@ -29302,20 +33368,20 @@

    fcp_loss_of_signalUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/external_cacheusage
    Unit: percent
    Type: raw
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yaml
    ZAPIperf-object-get-instances fcp_portloss_of_signal
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances ext_cache_objusage
    Unit: percent
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml
    -

    fcp_loss_of_sync

    -

    Number of times this port lost sync

    +

    flashpool_cache_stats

    +

    Automated Working-set Analyzer (AWA) per-interval pseudo cache statistics for the most recent intervals. The number of intervals defined as recent is CM_WAFL_HYAS_INT_DIS_CNT. This array is a table with fields corresponding to the enum type of hyas_cache_stat_type_t.

    @@ -29328,20 +33394,20 @@

    fcp_loss_of_syncUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_sizercache_stats
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_hya_sizer.yaml
    ZAPIperf-object-get-instances fcp_portloss_of_sync
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances wafl_hya_sizercache_stats
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_sizer.yaml
    -

    fcp_max_speed

    -

    The maximum speed supported by the FC port in gigabits per second.

    +

    flashpool_evict_destage_rate

    +

    Number of block destage per second.

    @@ -29354,14 +33420,20 @@

    fcp_max_speed

    - - - + + + + + + + + +
    RESTapi/network/fc/portsspeed.maximumconf/rest/9.6.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregateevict_destage_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrevict_destage_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_avg_other_latency

    -

    Average latency for operations other than read and write (FC-NVMe)

    +

    flashpool_evict_remove_rate

    +

    Number of block free per second.

    @@ -29374,20 +33446,20 @@

    fcp_nvmf_avg_other_latencyUnit: microsec
    Type: average
    Base: nvmf.other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregateevict_remove_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_avg_other_latency
    Unit: microsec
    Type: average
    Base: nvmf_other_ops
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrevict_remove_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_avg_read_latency

    -

    Average latency for read operations (FC-NVMe)

    +

    flashpool_hya_read_hit_latency_average

    +

    Average of RAID I/O latency on read hit.

    @@ -29400,20 +33472,20 @@

    fcp_nvmf_avg_read_latencyUnit: microsec
    Type: average
    Base: nvmf.read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatehya_read_hit_latency_average
    Unit: millisec
    Type: average
    Base: hya_read_hit_latency_count
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_avg_read_latency
    Unit: microsec
    Type: average
    Base: nvmf_read_ops
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrhya_read_hit_latency_average
    Unit: millisec
    Type: average
    Base: hya_read_hit_latency_count
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_avg_remote_other_latency

    -

    Average latency for remote operations other than read and write (FC-NVMe)

    +

    flashpool_hya_read_miss_latency_average

    +

    Average read miss latency.

    @@ -29426,20 +33498,20 @@

    fcp_nvmf_avg_remote_other_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpnvmf.average_remote_other_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote.other_ops
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatehya_read_miss_latency_average
    Unit: millisec
    Type: average
    Base: hya_read_miss_latency_count
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_avg_remote_other_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote_other_ops
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrhya_read_miss_latency_average
    Unit: millisec
    Type: average
    Base: hya_read_miss_latency_count
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_avg_remote_read_latency

    -

    Average latency for remote read operations (FC-NVMe)

    +

    flashpool_hya_write_hdd_latency_average

    +

    Average write latency to HDD.

    @@ -29452,20 +33524,20 @@

    fcp_nvmf_avg_remote_read_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpnvmf.average_remote_read_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote.read_ops
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatehya_write_hdd_latency_average
    Unit: millisec
    Type: average
    Base: hya_write_hdd_latency_count
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_avg_remote_read_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote_read_ops
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrhya_write_hdd_latency_average
    Unit: millisec
    Type: average
    Base: hya_write_hdd_latency_count
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_avg_remote_write_latency

    -

    Average latency for remote write operations (FC-NVMe)

    +

    flashpool_hya_write_ssd_latency_average

    +

    Average of RAID I/O latency on write to SSD.

    @@ -29478,20 +33550,20 @@

    fcp_nvmf_avg_remote_write_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpnvmf.average_remote_write_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote.write_ops
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatehya_write_ssd_latency_average
    Unit: millisec
    Type: average
    Base: hya_write_ssd_latency_count
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_avg_remote_write_latency
    Unit: microsec
    Type: average
    Base: nvmf_remote_write_ops
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrhya_write_ssd_latency_average
    Unit: millisec
    Type: average
    Base: hya_write_ssd_latency_count
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_avg_write_latency

    -

    Average latency for write operations (FC-NVMe)

    +

    flashpool_read_cache_ins_rate

    +

    Cache insert rate blocks/sec.

    @@ -29504,20 +33576,20 @@

    fcp_nvmf_avg_write_latencyUnit: microsec
    Type: average
    Base: nvmf.write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregateread_cache_insert_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_avg_write_latency
    Unit: microsec
    Type: average
    Base: nvmf_write_ops
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrread_cache_ins_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_caw_data

    -

    Amount of CAW data sent to the storage system (FC-NVMe)

    +

    flashpool_read_ops_replaced

    +

    Number of HDD read operations replaced by SSD reads per second.

    @@ -29530,20 +33602,20 @@

    fcp_nvmf_caw_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregateread_ops_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_caw_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrread_ops_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_caw_ops

    -

    Number of FC-NVMe CAW operations

    +

    flashpool_read_ops_replaced_percent

    +

    Percentage of HDD read operations replace by SSD.

    @@ -29556,20 +33628,20 @@

    fcp_nvmf_caw_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregateread_ops_replaced_percent
    Unit: percent
    Type: percent
    Base: read_ops_total
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_caw_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrread_ops_replaced_percent
    Unit: percent
    Type: percent
    Base: read_ops_total
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_command_slots

    -

    Number of command slots that have been used by initiators logging into this port. This shows the command fan-in on the port.

    +

    flashpool_ssd_available

    +

    Total SSD blocks available.

    @@ -29582,20 +33654,20 @@

    fcp_nvmf_command_slots

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpnvmf.command_slots
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatessd_available
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_command_slots
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrssd_available
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_other_ops

    -

    Number of NVMF operations that are not read or write.

    +

    flashpool_ssd_read_cached

    +

    Total read cached SSD blocks.

    @@ -29608,20 +33680,20 @@

    fcp_nvmf_other_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatessd_read_cached
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrssd_read_cached
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_read_data

    -

    Amount of data read from the storage system (FC-NVMe)

    +

    flashpool_ssd_total

    +

    Total SSD blocks.

    @@ -29634,20 +33706,20 @@

    fcp_nvmf_read_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatessd_total
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrssd_total
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_read_ops

    -

    Number of FC-NVMe read operations

    +

    flashpool_ssd_total_used

    +

    Total SSD blocks used.

    @@ -29660,20 +33732,20 @@

    fcp_nvmf_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatessd_total_used
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrssd_total_used
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_remote_caw_data

    -

    Amount of remote CAW data sent to the storage system (FC-NVMe)

    +

    flashpool_ssd_write_cached

    +

    Total write cached SSD blocks.

    @@ -29686,20 +33758,20 @@

    fcp_nvmf_remote_caw_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpnvmf_remote.caw_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatessd_write_cached
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_caw_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrssd_write_cached
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_remote_caw_ops

    -

    Number of FC-NVMe remote CAW operations

    +

    flashpool_wc_write_blks_total

    +

    Number of write-cache blocks written per second.

    @@ -29712,20 +33784,20 @@

    fcp_nvmf_remote_caw_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpnvmf_remote.caw_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatewc_write_blocks_total
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_caw_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrwc_write_blks_total
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_remote_other_ops

    -

    Number of NVMF remote operations that are not read or write.

    +

    flashpool_write_blks_replaced

    +

    Number of HDD write blocks replaced by SSD writes per second.

    @@ -29738,20 +33810,20 @@

    fcp_nvmf_remote_other_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatewrite_blocks_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrwrite_blks_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_remote_read_data

    -

    Amount of remote data read from the storage system (FC-NVMe)

    +

    flashpool_write_blks_replaced_percent

    +

    Percentage of blocks overwritten to write-cache among all disk writes.

    @@ -29764,20 +33836,20 @@

    fcp_nvmf_remote_read_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/wafl_hya_per_aggregatewrite_blocks_replaced_percent
    Unit: percent
    Type: average
    Base: estimated_write_blocks_total
    conf/restperf/9.12.0/wafl_hya_per_aggr.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances wafl_hya_per_aggrwrite_blks_replaced_percent
    Unit: percent
    Type: average
    Base: est_write_blks_total
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml
    -

    fcp_nvmf_remote_read_ops

    -

    Number of FC-NVMe remote read operations

    +

    flexcache_blocks_requested_from_client

    +

    Total number of blocks requested from client

    @@ -29789,21 +33861,15 @@

    fcp_nvmf_remote_read_ops

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/fcpnvmf_remote.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeblocks_requested_from_client
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_nvmf_remote_total_data

    -

    Amount of remote FC-NVMe traffic to and from the storage system

    +

    flexcache_blocks_retrieved_from_origin

    +

    Total number of blocks retrieved from origin

    @@ -29815,21 +33881,15 @@

    fcp_nvmf_remote_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_total_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeblocks_retrieved_from_origin
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_nvmf_remote_total_ops

    -

    Total number of remote FC-NVMe operations

    +

    flexcache_evict_rw_cache_skipped_reason_disconnected

    +

    Total number of read-write cache evict operations skipped because cache is disconnected.

    @@ -29841,21 +33901,15 @@

    fcp_nvmf_remote_total_opsUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeevict_rw_cache_skipped_reason_disconnected
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_nvmf_remote_write_data

    -

    Amount of remote data written to the storage system (FC-NVMe)

    +

    flexcache_evict_skipped_reason_config_noent

    +

    Total number of evict operation is skipped because cache config is not available.

    @@ -29867,21 +33921,15 @@

    fcp_nvmf_remote_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeevict_skipped_reason_config_noent
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_nvmf_remote_write_ops

    -

    Number of FC-NVMe remote write operations

    +

    flexcache_evict_skipped_reason_disconnected

    +

    Total number of evict operation is skipped because cache is disconnected.

    @@ -29893,21 +33941,15 @@

    fcp_nvmf_remote_write_opsUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_remote_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeevict_skipped_reason_disconnected
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_nvmf_total_data

    -

    Amount of FC-NVMe traffic to and from the storage system

    +

    flexcache_evict_skipped_reason_offline

    +

    Total number of evict operation is skipped because cache volume is offline.

    @@ -29919,21 +33961,15 @@

    fcp_nvmf_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_total_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeevict_skipped_reason_offline
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_nvmf_total_ops

    -

    Total number of FC-NVMe operations

    +

    flexcache_invalidate_skipped_reason_config_noent

    +

    Total number of invalidate operation is skipped because cache config is not available.

    @@ -29945,21 +33981,15 @@

    fcp_nvmf_total_opsUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeinvalidate_skipped_reason_config_noent
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_nvmf_write_data

    -

    Amount of data written to the storage system (FC-NVMe)

    +

    flexcache_invalidate_skipped_reason_disconnected

    +

    Total number of invalidate operation is skipped because cache is disconnected.

    @@ -29971,21 +34001,15 @@

    fcp_nvmf_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeinvalidate_skipped_reason_disconnected
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_nvmf_write_ops

    -

    Number of FC-NVMe write operations

    +

    flexcache_invalidate_skipped_reason_offline

    +

    Total number of invalidate operation is skipped because cache volume is offline.

    @@ -29997,21 +34021,15 @@

    fcp_nvmf_write_opsUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portnvmf_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/fcp.yamlperf-object-get-instances flexcache_per_volumeinvalidate_skipped_reason_offline
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_other_ops

    -

    Number of operations that are not read or write.

    +

    flexcache_miss_percent

    +

    This metric represents the percentage of block requests from a client that resulted in a "miss" in the FlexCache. A "miss" occurs when the requested data is not found in the cache and has to be retrieved from the origin volume.

    @@ -30023,21 +34041,15 @@

    fcp_other_ops

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/fcpother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlflexcache_per_volumeblocks_retrieved_from_origin, blocks_requested_from_client
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_prim_seq_err

    -

    Number of primitive sequence errors

    +

    flexcache_nix_retry_skipped_reason_initiator_retrieve

    +

    Total retry nix operations skipped because the initiator is retrieve operation.

    @@ -30049,21 +34061,15 @@

    fcp_prim_seq_errUnit: none
    Type: delta
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portprim_seq_err
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances flexcache_per_volumenix_retry_skipped_reason_initiator_retrieve
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_queue_full

    -

    Number of times a queue full condition occurred.

    +

    flexcache_nix_skipped_reason_config_noent

    +

    Total number of nix operation is skipped because cache config is not available.

    @@ -30075,21 +34081,15 @@

    fcp_queue_fullUnit: none
    Type: delta
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portqueue_full
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances flexcache_per_volumenix_skipped_reason_config_noent
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_read_data

    -

    Amount of data read from the storage system

    +

    flexcache_nix_skipped_reason_disconnected

    +

    Total number of nix operation is skipped because cache is disconnected.

    @@ -30101,21 +34101,15 @@

    fcp_read_data

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/fcpread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances flexcache_per_volumenix_skipped_reason_disconnected
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_read_ops

    -

    Number of read operations

    +

    flexcache_nix_skipped_reason_in_progress

    +

    Total nix operations skipped because of an in-progress nix.

    @@ -30127,21 +34121,15 @@

    fcp_read_ops

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/fcpread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances flexcache_per_volumenix_skipped_reason_in_progress
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_reset_count

    -

    Number of physical port resets

    +

    flexcache_nix_skipped_reason_offline

    +

    Total number of nix operation is skipped because cache volume is offline.

    @@ -30153,21 +34141,15 @@

    fcp_reset_countUnit: none
    Type: delta
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portreset_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances flexcache_per_volumenix_skipped_reason_offline
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_shared_int_count

    -

    Number of shared interrupts

    +

    flexcache_reconciled_data_entries

    +

    Total number of reconciled data entries at cache side.

    @@ -30179,21 +34161,15 @@

    fcp_shared_int_countUnit: none
    Type: delta
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portshared_int_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances flexcache_per_volumereconciled_data_entries
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_spurious_int_count

    -

    Number of spurious interrupts

    +

    flexcache_reconciled_lock_entries

    +

    Total number of reconciled lock entries at cache side.

    @@ -30205,21 +34181,15 @@

    fcp_spurious_int_count

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/fcpspurious_interrupt_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcp.yaml
    ZAPIperf-object-get-instances fcp_portspurious_int_count
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances flexcache_per_volumereconciled_lock_entries
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yaml
    -

    fcp_threshold_full

    -

    Number of times the total number of outstanding commands on the port exceeds the threshold supported by this port.

    +

    flexcache_size

    +

    Physical size of the FlexCache. The recommended size for a FlexCache is 10% of the origin volume. The minimum FlexCache constituent size is 1GB.

    @@ -30232,20 +34202,20 @@

    fcp_threshold_fullUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/storage/flexcache/flexcachessizeconf/rest/9.12.0/flexcache.yaml
    ZAPIperf-object-get-instances fcp_portthreshold_full
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlflexcache-get-iterflexcache-info.sizeconf/zapi/cdot/9.8.0/flexcache.yaml
    -

    fcp_total_data

    -

    Amount of FCP traffic to and from the storage system

    +

    headroom_aggr_current_latency

    +

    This is the storage aggregate average latency per message at the disk level.

    @@ -30258,20 +34228,20 @@

    fcp_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/headroom_aggregatecurrent_latency
    Unit: microsec
    Type: average
    Base: current_ops
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcp_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances resource_headroom_aggrcurrent_latency
    Unit: microsec
    Type: average
    Base: current_ops
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcp_total_ops

    -

    Total number of FCP operations

    +

    headroom_aggr_current_ops

    +

    Total number of I/Os processed by the aggregate per second.

    @@ -30284,20 +34254,20 @@

    fcp_total_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcptotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/headroom_aggregatecurrent_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcp_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances resource_headroom_aggrcurrent_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcp_write_data

    -

    Amount of data written to the storage system

    +

    headroom_aggr_current_utilization

    +

    This is the storage aggregate average utilization of all the data disks in the aggregate.

    @@ -30310,20 +34280,20 @@

    fcp_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/headroom_aggregatecurrent_utilization
    Unit: percent
    Type: percent
    Base: current_utilization_denominator
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcp_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances resource_headroom_aggrcurrent_utilization
    Unit: percent
    Type: percent
    Base: current_utilization_total
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcp_write_ops

    -

    Number of write operations

    +

    headroom_aggr_ewma_daily

    +

    Daily exponential weighted moving average.

    @@ -30336,20 +34306,20 @@

    fcp_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcpwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/fcp.yamlapi/cluster/counter/tables/headroom_aggregateewma.daily
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcp_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcp.yamlperf-object-get-instances resource_headroom_aggrewma_daily
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcvi_firmware_invalid_crc_count

    -

    Firmware reported invalid CRC count

    +

    headroom_aggr_ewma_hourly

    +

    Hourly exponential weighted moving average.

    @@ -30362,20 +34332,20 @@

    fcvi_firmware_invalid_crc_count

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcvifirmware.invalid_crc_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_aggregateewma.hourly
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcvifw_invalid_crc
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_aggrewma_hourly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcvi_firmware_invalid_transmit_word_count

    -

    Firmware reported invalid transmit word count

    +

    headroom_aggr_ewma_monthly

    +

    Monthly exponential weighted moving average.

    @@ -30388,20 +34358,20 @@

    fcvi_firmware_invalid_transmi

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcvifirmware.invalid_transmit_word_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_aggregateewma.monthly
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcvifw_invalid_xmit_words
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_aggrewma_monthly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    - -

    Firmware reported link failure count

    +

    headroom_aggr_ewma_weekly

    +

    Weekly exponential weighted moving average.

    @@ -30414,20 +34384,20 @@ - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcvifirmware.link_failure_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_aggregateewma.weekly
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcvifw_link_failure
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_aggrewma_weekly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcvi_firmware_loss_of_signal_count

    -

    Firmware reported loss of signal count

    +

    headroom_aggr_optimal_point_confidence_factor

    +

    The confidence factor for the optimal point value based on the observed resource latency and utilization.

    @@ -30440,20 +34410,20 @@

    fcvi_firmware_loss_of_signal_count

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcvifirmware.loss_of_signal_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_aggregateoptimal_point.confidence_factor
    Unit: none
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcvifw_loss_of_signal
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_aggroptimal_point_confidence_factor
    Unit: none
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcvi_firmware_loss_of_sync_count

    -

    Firmware reported loss of sync count

    +

    headroom_aggr_optimal_point_latency

    +

    The latency component of the optimal point of the latency/utilization curve.

    @@ -30466,20 +34436,20 @@

    fcvi_firmware_loss_of_sync_count

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcvifirmware.loss_of_sync_count
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_aggregateoptimal_point.latency
    Unit: microsec
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcvifw_loss_of_sync
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_aggroptimal_point_latency
    Unit: microsec
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcvi_firmware_systat_discard_frames

    -

    Firmware reported SyStatDiscardFrames value

    +

    headroom_aggr_optimal_point_ops

    +

    The ops component of the optimal point derived from the latency/utilzation curve.

    @@ -30492,20 +34462,20 @@

    fcvi_firmware_systat_discard_frames

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/fcvifirmware.systat.discard_frames
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_aggregateoptimal_point.ops
    Unit: per_sec
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcvifw_SyStatDiscardFrames
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_aggroptimal_point_ops
    Unit: per_sec
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcvi_hard_reset_count

    -

    Number of times hard reset of FCVI adapter got issued.

    +

    headroom_aggr_optimal_point_utilization

    +

    The utilization component of the optimal point of the latency/utilization curve.

    @@ -30518,20 +34488,20 @@

    fcvi_hard_reset_countUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_aggregateoptimal_point.utilization
    Unit: none
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_aggr.yaml
    ZAPIperf-object-get-instances fcvihard_reset_cnt
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_aggroptimal_point_utilization
    Unit: none
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml
    -

    fcvi_rdma_write_avg_latency

    -

    Average RDMA write I/O latency.

    +

    headroom_cpu_current_latency

    +

    Current operation latency of the resource.

    @@ -30544,20 +34514,20 @@

    fcvi_rdma_write_avg_latencyUnit: microsec
    Type: average
    Base: rdma.write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_cpucurrent_latency
    Unit: microsec
    Type: average
    Base: current_ops
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances fcvirdma_write_avg_latency
    Unit: microsec
    Type: average
    Base: rdma_write_ops
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_cpucurrent_latency
    Unit: microsec
    Type: average
    Base: current_ops
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    fcvi_rdma_write_ops

    -

    Number of RDMA write I/Os issued per second.

    +

    headroom_cpu_current_ops

    +

    Total number of operations per second (also referred to as dblade ops).

    @@ -30570,20 +34540,20 @@

    fcvi_rdma_write_opsUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_cpucurrent_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances fcvirdma_write_ops
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_cpucurrent_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    fcvi_rdma_write_throughput

    -

    RDMA write throughput in bytes per second.

    +

    headroom_cpu_current_utilization

    +

    Average processor utilization across all processors in the system.

    @@ -30596,20 +34566,20 @@

    fcvi_rdma_write_throughputUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_cpucurrent_utilization
    Unit: percent
    Type: percent
    Base: elapsed_time
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances fcvirdma_write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_cpucurrent_utilization
    Unit: percent
    Type: percent
    Base: current_utilization_total
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    fcvi_soft_reset_count

    -

    Number of times soft reset of FCVI adapter got issued.

    +

    headroom_cpu_ewma_daily

    +

    Daily exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    @@ -30622,20 +34592,20 @@

    fcvi_soft_reset_countUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/fcvi.yamlapi/cluster/counter/tables/headroom_cpuewma.daily
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances fcvisoft_reset_cnt
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/fcvi.yamlperf-object-get-instances resource_headroom_cpuewma_daily
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    flashcache_accesses

    -

    External cache accesses per second

    +

    headroom_cpu_ewma_hourly

    +

    Hourly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    @@ -30648,20 +34618,20 @@

    flashcache_accessesUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/headroom_cpuewma.hourly
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances ext_cache_objaccesses
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances resource_headroom_cpuewma_hourly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    flashcache_disk_reads_replaced

    -

    Estimated number of disk reads per second replaced by cache

    +

    headroom_cpu_ewma_monthly

    +

    Monthly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    @@ -30674,20 +34644,20 @@

    flashcache_disk_reads_replacedUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/headroom_cpuewma.monthly
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances ext_cache_objdisk_reads_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances resource_headroom_cpuewma_monthly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    flashcache_evicts

    -

    Number of blocks evicted from the external cache to make room for new blocks

    +

    headroom_cpu_ewma_weekly

    +

    Weekly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    @@ -30700,20 +34670,20 @@

    flashcache_evictsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/headroom_cpuewma.weekly
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances ext_cache_objevicts
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances resource_headroom_cpuewma_weekly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    flashcache_hit

    -

    Number of WAFL buffers served off the external cache

    +

    headroom_cpu_optimal_point_confidence_factor

    +

    Confidence factor for the optimal point value based on the observed resource latency and utilization. The possible values are: 0 - unknown, 1 - low, 2 - medium, 3 - high. This counter can provide an average confidence factor over a range of time.

    @@ -30726,20 +34696,20 @@

    flashcache_hitUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/headroom_cpuoptimal_point.confidence_factor
    Unit: none
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances ext_cache_objhit
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances resource_headroom_cpuoptimal_point_confidence_factor
    Unit: none
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    flashcache_hit_directory

    -

    Number of directory buffers served off the external cache

    +

    headroom_cpu_optimal_point_latency

    +

    Latency component of the optimal point of the latency/utilization curve. This counter can provide an average latency over a range of time.

    @@ -30752,20 +34722,20 @@

    flashcache_hit_directory

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/external_cachehit.directory
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/headroom_cpuoptimal_point.latency
    Unit: microsec
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances ext_cache_objhit_directory
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances resource_headroom_cpuoptimal_point_latency
    Unit: microsec
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    flashcache_hit_indirect

    -

    Number of indirect file buffers served off the external cache

    +

    headroom_cpu_optimal_point_ops

    +

    Ops component of the optimal point derived from the latency/utilization curve. This counter can provide an average ops over a range of time.

    @@ -30778,20 +34748,20 @@

    flashcache_hit_indirect

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/external_cachehit.indirect
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/headroom_cpuoptimal_point.ops
    Unit: per_sec
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances ext_cache_objhit_indirect
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances resource_headroom_cpuoptimal_point_ops
    Unit: per_sec
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml
    -

    flashcache_hit_metadata_file

    -

    Number of metadata file buffers served off the external cache

    +

    headroom_cpu_optimal_point_utilization

    +

    Utilization component of the optimal point of the latency/utilization curve. This counter can provide an average utilization over a range of time.

    @@ -30804,20 +34774,20 @@

    flashcache_hit_metadata_file

    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/headroom_cpuoptimal_point.utilization
    Unit: none
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances ext_cache_objhit_metadata_file
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances resource_headroom_cpuoptimal_point_utilization
    Unit: none
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml

    -

    Number of normal level 0 WAFL buffers served off the external cache

    +

    hostadapter_bytes_read

    +

    Bytes read through a host adapter

    @@ -30830,20 +34800,20 @@

    flashcache_hit_normal_lev0Unit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/host_adapterbytes_read
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/hostadapter.yaml
    ZAPIperf-object-get-instances ext_cache_objhit_normal_lev0
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances hostadapterbytes_read
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/hostadapter.yaml
    -

    flashcache_hit_percent

    -

    External cache hit rate

    +

    hostadapter_bytes_written

    +

    Bytes written through a host adapter

    @@ -30856,20 +34826,20 @@

    flashcache_hit_percent

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/external_cachehit.percent
    Unit: percent
    Type: average
    Base: accesses
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/host_adapterbytes_written
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/hostadapter.yaml
    ZAPIperf-object-get-instances ext_cache_objhit_percent
    Unit: percent
    Type: percent
    Base: accesses
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances hostadapterbytes_written
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/hostadapter.yaml
    -

    flashcache_inserts

    -

    Number of WAFL buffers inserted into the external cache

    +

    iscsi_lif_avg_latency

    +

    Average latency for iSCSI operations

    @@ -30882,20 +34852,20 @@

    flashcache_insertsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/iscsi_lifaverage_latency
    Unit: microsec
    Type: average
    Base: cmd_transferred
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances ext_cache_objinserts
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances iscsi_lifavg_latency
    Unit: microsec
    Type: average
    Base: cmd_transfered
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashcache_invalidates

    -

    Number of blocks invalidated in the external cache

    +

    iscsi_lif_avg_other_latency

    +

    Average latency for operations other than read and write (for example, Inquiry, Report LUNs, SCSI Task Management Functions)

    @@ -30908,20 +34878,20 @@

    flashcache_invalidates

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/external_cacheinvalidates
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/iscsi_lifaverage_other_latency
    Unit: microsec
    Type: average
    Base: iscsi_other_ops
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances ext_cache_objinvalidates
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances iscsi_lifavg_other_latency
    Unit: microsec
    Type: average
    Base: iscsi_other_ops
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashcache_miss

    -

    External cache misses

    +

    iscsi_lif_avg_read_latency

    +

    Average latency for read operations

    @@ -30934,20 +34904,20 @@

    flashcache_missUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/iscsi_lifaverage_read_latency
    Unit: microsec
    Type: average
    Base: iscsi_read_ops
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances ext_cache_objmiss
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances iscsi_lifavg_read_latency
    Unit: microsec
    Type: average
    Base: iscsi_read_ops
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashcache_miss_directory

    -

    External cache misses accessing directory buffers

    +

    iscsi_lif_avg_write_latency

    +

    Average latency for write operations

    @@ -30960,20 +34930,20 @@

    flashcache_miss_directoryUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/iscsi_lifaverage_write_latency
    Unit: microsec
    Type: average
    Base: iscsi_write_ops
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances ext_cache_objmiss_directory
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances iscsi_lifavg_write_latency
    Unit: microsec
    Type: average
    Base: iscsi_write_ops
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashcache_miss_indirect

    -

    External cache misses accessing indirect file buffers

    +

    iscsi_lif_cmd_transfered

    +

    Command transferred by this iSCSI connection

    @@ -30986,20 +34956,20 @@

    flashcache_miss_indirect

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/external_cachemiss.indirect
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/iscsi_lifcmd_transferred
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances ext_cache_objmiss_indirect
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances iscsi_lifcmd_transfered
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashcache_miss_metadata_file

    -

    External cache misses accessing metadata file buffers

    +

    iscsi_lif_iscsi_other_ops

    +

    iSCSI other operations per second on this logical interface (LIF)

    @@ -31012,20 +34982,20 @@

    flashcache_miss_metadata_file

    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/iscsi_lifiscsi_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances ext_cache_objmiss_metadata_file
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances iscsi_lifiscsi_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml

    -

    External cache misses accessing normal level 0 buffers

    +

    iscsi_lif_iscsi_read_ops

    +

    iSCSI read operations per second on this logical interface (LIF)

    @@ -31038,20 +35008,20 @@

    flashcache_miss_normal_lev0Unit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/iscsi_lifiscsi_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances ext_cache_objmiss_normal_lev0
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances iscsi_lifiscsi_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashcache_usage

    -

    Percentage of blocks in external cache currently containing valid data

    +

    iscsi_lif_iscsi_write_ops

    +

    iSCSI write operations per second on this logical interface (LIF)

    @@ -31064,20 +35034,20 @@

    flashcache_usageUnit: percent
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/ext_cache_obj.yamlapi/cluster/counter/tables/iscsi_lifiscsi_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances ext_cache_objusage
    Unit: percent
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/ext_cache_obj.yamlperf-object-get-instances iscsi_lifiscsi_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashpool_cache_stats

    -

    Automated Working-set Analyzer (AWA) per-interval pseudo cache statistics for the most recent intervals. The number of intervals defined as recent is CM_WAFL_HYAS_INT_DIS_CNT. This array is a table with fields corresponding to the enum type of hyas_cache_stat_type_t.

    +

    iscsi_lif_protocol_errors

    +

    Number of protocol errors from iSCSI sessions on this logical interface (LIF)

    @@ -31090,20 +35060,20 @@

    flashpool_cache_statsUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_sizer.yamlapi/cluster/counter/tables/iscsi_lifprotocol_errors
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances wafl_hya_sizercache_stats
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_sizer.yamlperf-object-get-instances iscsi_lifprotocol_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashpool_evict_destage_rate

    -

    Number of block destage per second.

    +

    iscsi_lif_read_data

    +

    Amount of data read from the storage system in bytes

    @@ -31116,20 +35086,20 @@

    flashpool_evict_destage_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/iscsi_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrevict_destage_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances iscsi_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashpool_evict_remove_rate

    -

    Number of block free per second.

    +

    iscsi_lif_write_data

    +

    Amount of data written to the storage system in bytes

    @@ -31142,20 +35112,20 @@

    flashpool_evict_remove_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/iscsi_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrevict_remove_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances iscsi_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml
    -

    flashpool_hya_read_hit_latency_average

    -

    Average of RAID I/O latency on read hit.

    +

    iw_avg_latency

    +

    Average RDMA I/O latency.

    @@ -31168,20 +35138,20 @@

    flashpool_hya_read_hit_latency_a

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_hya_per_aggregatehya_read_hit_latency_average
    Unit: millisec
    Type: average
    Base: hya_read_hit_latency_count
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/iwarpaverage_latency
    Unit: microsec
    Type: average
    Base: ops
    conf/restperf/9.14.1/iwarp.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrhya_read_hit_latency_average
    Unit: millisec
    Type: average
    Base: hya_read_hit_latency_count
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances iwarpiw_avg_latency
    Unit: microsec
    Type: average
    Base: iw_ops
    conf/zapiperf/cdot/9.8.0/iwarp.yaml
    -

    flashpool_hya_read_miss_latency_average

    -

    Average read miss latency.

    +

    iw_ops

    +

    Number of RDMA I/Os issued.

    @@ -31194,20 +35164,20 @@

    flashpool_hya_read_miss_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_hya_per_aggregatehya_read_miss_latency_average
    Unit: millisec
    Type: average
    Base: hya_read_miss_latency_count
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/iwarpops
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.14.1/iwarp.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrhya_read_miss_latency_average
    Unit: millisec
    Type: average
    Base: hya_read_miss_latency_count
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances iwarpiw_ops
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iwarp.yaml
    -

    flashpool_hya_write_hdd_latency_average

    -

    Average write latency to HDD.

    +

    iw_read_ops

    +

    Number of RDMA read I/Os issued.

    @@ -31220,20 +35190,20 @@

    flashpool_hya_write_hdd_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_hya_per_aggregatehya_write_hdd_latency_average
    Unit: millisec
    Type: average
    Base: hya_write_hdd_latency_count
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/iwarpread_ops
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.14.1/iwarp.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrhya_write_hdd_latency_average
    Unit: millisec
    Type: average
    Base: hya_write_hdd_latency_count
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances iwarpiw_read_ops
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/iwarp.yaml
    -

    flashpool_hya_write_ssd_latency_average

    -

    Average of RAID I/O latency on write to SSD.

    +

    iw_write_ops

    +

    Number of RDMA write I/Os issued.

    @@ -31246,20 +35216,20 @@

    flashpool_hya_write_ssd_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_hya_per_aggregatehya_write_ssd_latency_average
    Unit: millisec
    Type: average
    Base: hya_write_ssd_latency_count
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/iwarpwrite_ops
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.14.1/iwarp.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrhya_write_ssd_latency_average
    Unit: millisec
    Type: average
    Base: hya_write_ssd_latency_count
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances iwarpiw_write_ops
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/iwarp.yaml
    -

    flashpool_read_cache_ins_rate

    -

    Cache insert rate blocks/sec.

    +

    lif_recv_data

    +

    Number of bytes received per second

    @@ -31272,20 +35242,20 @@

    flashpool_read_cache_ins_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lifreceived_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrread_cache_ins_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lifrecv_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yaml
    -

    flashpool_read_ops_replaced

    -

    Number of HDD read operations replaced by SSD reads per second.

    +

    lif_recv_errors

    +

    Number of received Errors per second

    @@ -31298,20 +35268,20 @@

    flashpool_read_ops_replacedUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lifreceived_errors
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrread_ops_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lifrecv_errors
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yaml
    -

    flashpool_read_ops_replaced_percent

    -

    Percentage of HDD read operations replace by SSD.

    +

    lif_recv_packet

    +

    Number of packets received per second

    @@ -31324,20 +35294,20 @@

    flashpool_read_ops_replaced_percent

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_hya_per_aggregateread_ops_replaced_percent
    Unit: percent
    Type: percent
    Base: read_ops_total
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lifreceived_packets
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrread_ops_replaced_percent
    Unit: percent
    Type: percent
    Base: read_ops_total
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lifrecv_packet
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yaml
    -

    flashpool_ssd_available

    -

    Total SSD blocks available.

    +

    lif_sent_data

    +

    Number of bytes sent per second

    @@ -31350,20 +35320,20 @@

    flashpool_ssd_available

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_hya_per_aggregatessd_available
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lifsent_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrssd_available
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lifsent_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yaml
    -

    flashpool_ssd_read_cached

    -

    Total read cached SSD blocks.

    +

    lif_sent_errors

    +

    Number of sent errors per second

    @@ -31376,20 +35346,20 @@

    flashpool_ssd_read_cachedUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lifsent_errors
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrssd_read_cached
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lifsent_errors
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yaml
    -

    flashpool_ssd_total

    -

    Total SSD blocks.

    +

    lif_sent_packet

    +

    Number of packets sent per second

    @@ -31402,20 +35372,20 @@

    flashpool_ssd_totalUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lifsent_packets
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrssd_total
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lifsent_packet
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yaml
    -

    flashpool_ssd_total_used

    -

    Total SSD blocks used.

    +

    lun_avg_read_latency

    +

    Average read latency in microseconds for all operations on the LUN

    @@ -31428,20 +35398,20 @@

    flashpool_ssd_total_used

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_hya_per_aggregatessd_total_used
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lunaverage_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrssd_total_used
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lunavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flashpool_ssd_write_cached

    -

    Total write cached SSD blocks.

    +

    lun_avg_write_latency

    +

    Average write latency in microseconds for all operations on the LUN

    @@ -31454,20 +35424,20 @@

    flashpool_ssd_write_cachedUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lunaverage_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrssd_write_cached
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lunavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flashpool_wc_write_blks_total

    -

    Number of write-cache blocks written per second.

    +

    lun_avg_xcopy_latency

    +

    Average latency in microseconds for xcopy requests

    @@ -31480,20 +35450,20 @@

    flashpool_wc_write_blks_totalUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lunaverage_xcopy_latency
    Unit: microsec
    Type: average
    Base: xcopy_requests
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrwc_write_blks_total
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lunavg_xcopy_latency
    Unit: microsec
    Type: average
    Base: xcopy_reqs
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flashpool_write_blks_replaced

    -

    Number of HDD write blocks replaced by SSD writes per second.

    +

    lun_caw_reqs

    +

    Number of compare and write requests

    @@ -31506,20 +35476,20 @@

    flashpool_write_blks_replacedUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/luncaw_requests
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrwrite_blks_replaced
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances luncaw_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flashpool_write_blks_replaced_percent

    -

    Percentage of blocks overwritten to write-cache among all disk writes.

    +

    lun_enospc

    +

    Number of operations receiving ENOSPC errors

    @@ -31532,20 +35502,20 @@

    flashpool_write_blks_replaced_per

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/wafl_hya_per_aggregatewrite_blocks_replaced_percent
    Unit: percent
    Type: average
    Base: estimated_write_blocks_total
    conf/restperf/9.12.0/wafl_hya_per_aggr.yamlapi/cluster/counter/tables/lunenospc
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances wafl_hya_per_aggrwrite_blks_replaced_percent
    Unit: percent
    Type: average
    Base: est_write_blks_total
    conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yamlperf-object-get-instances lunenospc
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_blocks_requested_from_client

    -

    Total number of blocks requested from client

    +

    lun_queue_full

    +

    Queue full responses

    @@ -31557,15 +35527,21 @@

    flexcache_blocks_requested_from_

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunqueue_full
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeblocks_requested_from_client
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunqueue_full
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_blocks_retrieved_from_origin

    -

    Total number of blocks retrieved from origin

    +

    lun_read_align_histo

    +

    Histogram of WAFL read alignment (number sectors off WAFL block start)

    @@ -31577,15 +35553,21 @@

    flexcache_blocks_retrieved_from_

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunread_align_histogram
    Unit: percent
    Type: percent
    Base: read_ops_sent
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeblocks_retrieved_from_origin
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunread_align_histo
    Unit: percent
    Type: percent
    Base: read_ops_sent
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_evict_rw_cache_skipped_reason_disconnected

    -

    Total number of read-write cache evict operations skipped because cache is disconnected.

    +

    lun_read_data

    +

    Read bytes

    @@ -31597,15 +35579,21 @@

    flexcache_evict_rw

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeevict_rw_cache_skipped_reason_disconnected
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_evict_skipped_reason_config_noent

    -

    Total number of evict operation is skipped because cache config is not available.

    +

    lun_read_ops

    +

    Number of read operations

    @@ -31617,15 +35605,21 @@

    flexcache_evict_skipped_rea

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeevict_skipped_reason_config_noent
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_evict_skipped_reason_disconnected

    -

    Total number of evict operation is skipped because cache is disconnected.

    +

    lun_read_partial_blocks

    +

    Percentage of reads whose size is not a multiple of WAFL block size

    @@ -31637,15 +35631,21 @@

    flexcache_evict_skipped_rea

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunread_partial_blocks
    Unit: percent
    Type: percent
    Base: read_ops
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeevict_skipped_reason_disconnected
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunread_partial_blocks
    Unit: percent
    Type: percent
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_evict_skipped_reason_offline

    -

    Total number of evict operation is skipped because cache volume is offline.

    +

    lun_remote_bytes

    +

    I/O to or from a LUN which is not owned by the storage system handling the I/O.

    @@ -31657,15 +35657,21 @@

    flexcache_evict_skipped_reason_o

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunremote_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeevict_skipped_reason_offline
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunremote_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_invalidate_skipped_reason_config_noent

    -

    Total number of invalidate operation is skipped because cache config is not available.

    +

    lun_remote_ops

    +

    Number of operations received by a storage system that does not own the LUN targeted by the operations.

    @@ -31677,15 +35683,21 @@

    flexcache_invalidate_s

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunremote_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeinvalidate_skipped_reason_config_noent
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunremote_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_invalidate_skipped_reason_disconnected

    -

    Total number of invalidate operation is skipped because cache is disconnected.

    +

    lun_size

    +

    The total provisioned size of the LUN. The LUN size can be increased but not be made smaller using the REST interface.
    The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The actual minimum and maxiumum sizes vary depending on the ONTAP version, ONTAP platform and the available space in the containing volume and aggregate.
    For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    @@ -31697,15 +35709,21 @@

    flexcache_invalidate_s

    + + + + + + - - - + + +
    RESTapi/storage/lunsspace.sizeconf/rest/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeinvalidate_skipped_reason_disconnected
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamllun-get-iterlun-info.sizeconf/zapi/cdot/9.8.0/lun.yaml
    -

    flexcache_invalidate_skipped_reason_offline

    -

    Total number of invalidate operation is skipped because cache volume is offline.

    +

    lun_size_used

    +

    The amount of space consumed by the main data stream of the LUN.
    This value is the total space consumed in the volume by the LUN, including filesystem overhead, but excluding prefix and suffix streams. Due to internal filesystem overhead and the many ways SAN filesystems and applications utilize blocks within a LUN, this value does not necessarily reflect actual consumption/availability from the perspective of the filesystem or application. Without specific knowledge of how the LUN blocks are utilized outside of ONTAP, this property should not be used as an indicator for an out-of-space condition.
    For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    @@ -31717,15 +35735,21 @@

    flexcache_invalidate_skippe

    + + + + + + - - - + + +
    RESTapi/storage/lunsspace.usedconf/rest/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumeinvalidate_skipped_reason_offline
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamllun-get-iterlun-info.size-usedconf/zapi/cdot/9.8.0/lun.yaml
    -

    flexcache_miss_percent

    -

    This metric represents the percentage of block requests from a client that resulted in a "miss" in the FlexCache. A "miss" occurs when the requested data is not found in the cache and has to be retrieved from the origin volume.

    +

    lun_size_used_percent

    +

    This metric represents the percentage of a LUN that is currently being used.

    @@ -31737,15 +35761,21 @@

    flexcache_miss_percent

    + + + + + + - - - + + +
    RESTapi/storage/lunssize_used, sizeconf/rest/9.12.0/lun.yaml
    ZAPIflexcache_per_volumeblocks_retrieved_from_origin, blocks_requested_from_client
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamllun-get-itersize_used, sizeconf/zapi/cdot/9.8.0/lun.yaml
    -

    flexcache_nix_retry_skipped_reason_initiator_retrieve

    -

    Total retry nix operations skipped because the initiator is retrieve operation.

    +

    lun_unmap_reqs

    +

    Number of unmap command requests

    @@ -31757,15 +35787,21 @@

    flexcache_nix_ret

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lununmap_requests
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumenix_retry_skipped_reason_initiator_retrieve
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lununmap_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_nix_skipped_reason_config_noent

    -

    Total number of nix operation is skipped because cache config is not available.

    +

    lun_write_align_histo

    +

    Histogram of WAFL write alignment (number of sectors off WAFL block start)

    @@ -31777,15 +35813,21 @@

    flexcache_nix_skipped_reason_

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwrite_align_histogram
    Unit: percent
    Type: percent
    Base: write_ops_sent
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumenix_skipped_reason_config_noent
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunwrite_align_histo
    Unit: percent
    Type: percent
    Base: write_ops_sent
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_nix_skipped_reason_disconnected

    -

    Total number of nix operation is skipped because cache is disconnected.

    +

    lun_write_data

    +

    Write bytes

    @@ -31797,15 +35839,21 @@

    flexcache_nix_skipped_reason_

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumenix_skipped_reason_disconnected
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_nix_skipped_reason_in_progress

    -

    Total nix operations skipped because of an in-progress nix.

    +

    lun_write_ops

    +

    Number of write operations

    @@ -31817,15 +35865,21 @@

    flexcache_nix_skipped_reason_i

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumenix_skipped_reason_in_progress
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_nix_skipped_reason_offline

    -

    Total number of nix operation is skipped because cache volume is offline.

    +

    lun_write_partial_blocks

    +

    Percentage of writes whose size is not a multiple of WAFL block size

    @@ -31837,15 +35891,21 @@

    flexcache_nix_skipped_reason_offli

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwrite_partial_blocks
    Unit: percent
    Type: percent
    Base: write_ops
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumenix_skipped_reason_offline
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunwrite_partial_blocks
    Unit: percent
    Type: percent
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_reconciled_data_entries

    -

    Total number of reconciled data entries at cache side.

    +

    lun_writesame_reqs

    +

    Number of write same command requests

    @@ -31857,15 +35917,21 @@

    flexcache_reconciled_data_entries

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwritesame_requests
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumereconciled_data_entries
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunwritesame_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_reconciled_lock_entries

    -

    Total number of reconciled lock entries at cache side.

    +

    lun_writesame_unmap_reqs

    +

    Number of write same commands requests with unmap bit set

    @@ -31877,15 +35943,21 @@

    flexcache_reconciled_lock_entries

    + + + + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwritesame_unmap_requests
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances flexcache_per_volumereconciled_lock_entries
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunwritesame_unmap_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    flexcache_size

    -

    Physical size of the FlexCache. The recommended size for a FlexCache is 10% of the origin volume. The minimum FlexCache constituent size is 1GB.

    +

    lun_xcopy_reqs

    +

    Total number of xcopy operations on the LUN

    @@ -31898,20 +35970,20 @@

    flexcache_sizeUnit: none
    Type: rate
    Base: +

    - - - + + +
    conf/restperf/9.12.0/lun.yaml
    ZAPIflexcache-get-iterflexcache-info.sizeconf/zapi/cdot/9.8.0/flexcache.yamlperf-object-get-instances lunxcopy_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yaml
    -

    headroom_aggr_current_latency

    -

    This is the storage aggregate average latency per message at the disk level.

    +

    metadata_collector_api_time

    +

    amount of time to collect data from monitored cluster object

    @@ -31924,20 +35996,20 @@

    headroom_aggr_current_latencyUnit: microsec
    Type: average
    Base: current_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    ZAPIperf-object-get-instances resource_headroom_aggrcurrent_latency
    Unit: microsec
    Type: average
    Base: current_ops
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    -

    headroom_aggr_current_ops

    -

    Total number of I/Os processed by the aggregate per second.

    +

    metadata_collector_calc_time

    +

    amount of time it took to compute metrics between two successive polls, specifically using properties like raw, delta, rate, average, and percent. This metric is available for ZapiPerf/RestPerf collectors.

    @@ -31950,20 +36022,20 @@

    headroom_aggr_current_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    ZAPIperf-object-get-instances resource_headroom_aggrcurrent_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    -

    headroom_aggr_current_utilization

    -

    This is the storage aggregate average utilization of all the data disks in the aggregate.

    +

    metadata_collector_instances

    +

    number of objects collected from monitored cluster

    @@ -31976,20 +36048,20 @@

    headroom_aggr_current_utilization

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_aggregatecurrent_utilization
    Unit: percent
    Type: percent
    Base: current_utilization_denominator
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: scalar
    NA
    ZAPIperf-object-get-instances resource_headroom_aggrcurrent_utilization
    Unit: percent
    Type: percent
    Base: current_utilization_total
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: scalar
    NA
    -

    headroom_aggr_ewma_daily

    -

    Daily exponential weighted moving average.

    +

    metadata_collector_metrics

    +

    number of counters collected from monitored cluster

    @@ -32002,20 +36074,20 @@

    headroom_aggr_ewma_daily

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_aggregateewma.daily
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: scalar
    NA
    ZAPIperf-object-get-instances resource_headroom_aggrewma_daily
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: scalar
    NA
    -

    headroom_aggr_ewma_hourly

    -

    Hourly exponential weighted moving average.

    +

    metadata_collector_parse_time

    +

    amount of time to parse XML, JSON, etc. for cluster object

    @@ -32028,20 +36100,20 @@

    headroom_aggr_ewma_hourlyUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    ZAPIperf-object-get-instances resource_headroom_aggrewma_hourly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    -

    headroom_aggr_ewma_monthly

    -

    Monthly exponential weighted moving average.

    +

    metadata_collector_plugin_time

    +

    amount of time for all plugins to post-process metrics

    @@ -32054,20 +36126,20 @@

    headroom_aggr_ewma_monthlyUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    ZAPIperf-object-get-instances resource_headroom_aggrewma_monthly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    -

    headroom_aggr_ewma_weekly

    -

    Weekly exponential weighted moving average.

    +

    metadata_collector_poll_time

    +

    amount of time it took for the poll to finish

    @@ -32080,20 +36152,20 @@

    headroom_aggr_ewma_weeklyUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    ZAPIperf-object-get-instances resource_headroom_aggrewma_weekly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    -

    headroom_aggr_optimal_point_confidence_factor

    -

    The confidence factor for the optimal point value based on the observed resource latency and utilization.

    +

    metadata_collector_skips

    +

    number of metrics that were not calculated between two successive polls. This metric is available for ZapiPerf/RestPerf collectors.

    @@ -32106,20 +36178,20 @@

    headroom_aggr_optimal_poi

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_aggregateoptimal_point.confidence_factor
    Unit: none
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: scalar
    NA
    ZAPIperf-object-get-instances resource_headroom_aggroptimal_point_confidence_factor
    Unit: none
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: scalar
    NA
    -

    headroom_aggr_optimal_point_latency

    -

    The latency component of the optimal point of the latency/utilization curve.

    +

    metadata_collector_task_time

    +

    amount of time it took for each collector's subtasks to complete

    @@ -32132,20 +36204,20 @@

    headroom_aggr_optimal_point_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_aggregateoptimal_point.latency
    Unit: microsec
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    ZAPIperf-object-get-instances resource_headroom_aggroptimal_point_latency
    Unit: microsec
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: microseconds
    NA
    -

    headroom_aggr_optimal_point_ops

    -

    The ops component of the optimal point derived from the latency/utilzation curve.

    +

    metadata_component_count

    +

    number of metrics collected for each object

    @@ -32158,20 +36230,20 @@

    headroom_aggr_optimal_point_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_aggregateoptimal_point.ops
    Unit: per_sec
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: scalar
    NA
    ZAPIperf-object-get-instances resource_headroom_aggroptimal_point_ops
    Unit: per_sec
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: scalar
    NA
    -

    headroom_aggr_optimal_point_utilization

    -

    The utilization component of the optimal point of the latency/utilization curve.

    +

    metadata_component_status

    +

    status of the collector - 0 means running, 1 means standby, 2 means failed

    @@ -32184,20 +36256,20 @@

    headroom_aggr_optimal_point_uti

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_aggregateoptimal_point.utilization
    Unit: none
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: enum
    NA
    ZAPIperf-object-get-instances resource_headroom_aggroptimal_point_utilization
    Unit: none
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yamlNAHarvest generated
    Unit: enum
    NA
    -

    headroom_cpu_current_latency

    -

    Current operation latency of the resource.

    +

    metadata_exporter_count

    +

    number of metrics and labels exported

    @@ -32210,20 +36282,20 @@

    headroom_cpu_current_latencyUnit: microsec
    Type: average
    Base: current_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/resource_headroom_cpu.yamlNAHarvest generated
    Unit: scalar
    NA
    ZAPIperf-object-get-instances resource_headroom_cpucurrent_latency
    Unit: microsec
    Type: average
    Base: current_ops
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlNAHarvest generated
    Unit: scalar
    NA
    -

    headroom_cpu_current_ops

    -

    Total number of operations per second (also referred to as dblade ops).

    +

    metadata_exporter_time

    +

    amount of time it took to render, export, and serve exported data

    @@ -32236,20 +36308,20 @@

    headroom_cpu_current_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_cpucurrent_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yamlNAHarvest generated
    Unit: microseconds
    NA
    ZAPIperf-object-get-instances resource_headroom_cpucurrent_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlNAHarvest generated
    Unit: microseconds
    NA
    -

    headroom_cpu_current_utilization

    -

    Average processor utilization across all processors in the system.

    +

    metadata_target_goroutines

    +

    number of goroutines that exist within the poller

    @@ -32262,20 +36334,20 @@

    headroom_cpu_current_utilization

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_cpucurrent_utilization
    Unit: percent
    Type: percent
    Base: elapsed_time
    conf/restperf/9.12.0/resource_headroom_cpu.yamlNAHarvest generated
    Unit: scalar
    NA
    ZAPIperf-object-get-instances resource_headroom_cpucurrent_utilization
    Unit: percent
    Type: percent
    Base: current_utilization_total
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlNAHarvest generated
    Unit: scalar
    NA
    -

    headroom_cpu_ewma_daily

    -

    Daily exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    +

    metadata_target_status

    +

    status of the system being monitored. 0 means reachable, 1 means unreachable

    @@ -32288,20 +36360,20 @@

    headroom_cpu_ewma_daily

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_cpuewma.daily
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yamlNAHarvest generated
    Unit: enum
    NA
    ZAPIperf-object-get-instances resource_headroom_cpuewma_daily
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlNAHarvest generated
    Unit: enum
    NA
    -

    headroom_cpu_ewma_hourly

    -

    Hourly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    +

    metrocluster_check_aggr_status

    +

    Detail of the type of diagnostic operation run for the Aggregate with diagnostic operation result.

    @@ -32314,20 +36386,14 @@

    headroom_cpu_ewma_hourly

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/headroom_cpuewma.hourly
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances resource_headroom_cpuewma_hourly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlNAHarvest generatedconf/rest/9.12.0/metrocluster_check.yaml
    -

    headroom_cpu_ewma_monthly

    -

    Monthly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    +

    metrocluster_check_cluster_status

    +

    Detail of the type of diagnostic operation run for the Cluster with diagnostic operation result.

    @@ -32340,20 +36406,14 @@

    headroom_cpu_ewma_monthlyUnit: none
    Type: raw
    Base: -

    - - - - - - + + +
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances resource_headroom_cpuewma_monthly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlNAHarvest generatedconf/rest/9.12.0/metrocluster_check.yaml
    -

    headroom_cpu_ewma_weekly

    -

    Weekly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    +

    metrocluster_check_node_status

    +

    Detail of the type of diagnostic operation run for the Node with diagnostic operation result.

    @@ -32366,20 +36426,14 @@

    headroom_cpu_ewma_weekly

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/headroom_cpuewma.weekly
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances resource_headroom_cpuewma_weekly
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlNAHarvest generatedconf/rest/9.12.0/metrocluster_check.yaml
    -

    headroom_cpu_optimal_point_confidence_factor

    -

    Confidence factor for the optimal point value based on the observed resource latency and utilization. The possible values are: 0 - unknown, 1 - low, 2 - medium, 3 - high. This counter can provide an average confidence factor over a range of time.

    +

    metrocluster_check_volume_status

    +

    Detail of the type of diagnostic operation run for the Volume with diagnostic operation result.

    @@ -32392,20 +36446,14 @@

    headroom_cpu_optimal_point

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/headroom_cpuoptimal_point.confidence_factor
    Unit: none
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_cpu.yaml
    ZAPIperf-object-get-instances resource_headroom_cpuoptimal_point_confidence_factor
    Unit: none
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlNAHarvest generatedconf/rest/9.12.0/metrocluster_check.yaml
    -

    headroom_cpu_optimal_point_latency

    -

    Latency component of the optimal point of the latency/utilization curve. This counter can provide an average latency over a range of time.

    +

    namespace_avg_other_latency

    +

    Average other ops latency in microseconds for all operations on the Namespace

    @@ -32418,20 +36466,20 @@

    headroom_cpu_optimal_point_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_cpuoptimal_point.latency
    Unit: microsec
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_cpu.yamlapi/cluster/counter/tables/namespaceaverage_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances resource_headroom_cpuoptimal_point_latency
    Unit: microsec
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlperf-object-get-instances namespaceavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    headroom_cpu_optimal_point_ops

    -

    Ops component of the optimal point derived from the latency/utilization curve. This counter can provide an average ops over a range of time.

    +

    namespace_avg_read_latency

    +

    Average read latency in microseconds for all operations on the Namespace

    @@ -32444,20 +36492,20 @@

    headroom_cpu_optimal_point_opsUnit: per_sec
    Type: average
    Base: optimal_point.samples -

    + + + - - - + + +
    conf/restperf/9.12.0/resource_headroom_cpu.yamlapi/cluster/counter/tables/namespaceaverage_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances resource_headroom_cpuoptimal_point_ops
    Unit: per_sec
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlperf-object-get-instances namespaceavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    headroom_cpu_optimal_point_utilization

    -

    Utilization component of the optimal point of the latency/utilization curve. This counter can provide an average utilization over a range of time.

    +

    namespace_avg_write_latency

    +

    Average write latency in microseconds for all operations on the Namespace

    @@ -32470,20 +36518,20 @@

    headroom_cpu_optimal_point_utili

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/headroom_cpuoptimal_point.utilization
    Unit: none
    Type: average
    Base: optimal_point.samples
    conf/restperf/9.12.0/resource_headroom_cpu.yamlapi/cluster/counter/tables/namespaceaverage_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances resource_headroom_cpuoptimal_point_utilization
    Unit: none
    Type: average
    Base: optimal_point_samples
    conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yamlperf-object-get-instances namespaceavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    hostadapter_bytes_read

    -

    Bytes read through a host adapter

    +

    namespace_block_size

    +

    The size of blocks in the namespace in bytes.
    Valid in POST when creating an NVMe namespace that is not a clone of another. Disallowed in POST when creating a namespace clone. Valid in POST.

    @@ -32496,20 +36544,20 @@

    hostadapter_bytes_read

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/host_adapterbytes_read
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/hostadapter.yamlapi/storage/namespacesspace.block_sizeconf/rest/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances hostadapterbytes_read
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/hostadapter.yamlnvme-namespace-get-iternvme-namespace-info.block-sizeconf/zapi/cdot/9.8.0/namespace.yaml
    -

    hostadapter_bytes_written

    -

    Bytes written through a host adapter

    +

    namespace_other_ops

    +

    Number of other operations

    @@ -32522,20 +36570,20 @@

    hostadapter_bytes_writtenUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/hostadapter.yamlapi/cluster/counter/tables/namespaceother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances hostadapterbytes_written
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/hostadapter.yamlperf-object-get-instances namespaceother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    iscsi_lif_avg_latency

    -

    Average latency for iSCSI operations

    +

    namespace_read_data

    +

    Read bytes

    @@ -32548,20 +36596,20 @@

    iscsi_lif_avg_latencyUnit: microsec
    Type: average
    Base: cmd_transferred -

    + + + - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yamlapi/cluster/counter/tables/namespaceread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifavg_latency
    Unit: microsec
    Type: average
    Base: cmd_transfered
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlperf-object-get-instances namespaceread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    iscsi_lif_avg_other_latency

    -

    Average latency for operations other than read and write (for example, Inquiry, Report LUNs, SCSI Task Management Functions)

    +

    namespace_read_ops

    +

    Number of read operations

    @@ -32574,20 +36622,20 @@

    iscsi_lif_avg_other_latencyUnit: microsec
    Type: average
    Base: iscsi_other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yamlapi/cluster/counter/tables/namespaceread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifavg_other_latency
    Unit: microsec
    Type: average
    Base: iscsi_other_ops
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlperf-object-get-instances namespaceread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    iscsi_lif_avg_read_latency

    -

    Average latency for read operations

    +

    namespace_remote_bytes

    +

    Remote read bytes

    @@ -32600,20 +36648,20 @@

    iscsi_lif_avg_read_latencyUnit: microsec
    Type: average
    Base: iscsi_read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yamlapi/cluster/counter/tables/namespaceremote.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifavg_read_latency
    Unit: microsec
    Type: average
    Base: iscsi_read_ops
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlperf-object-get-instances namespaceremote_bytes
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    iscsi_lif_avg_write_latency

    -

    Average latency for write operations

    +

    namespace_remote_ops

    +

    Number of remote read operations

    @@ -32626,20 +36674,20 @@

    iscsi_lif_avg_write_latencyUnit: microsec
    Type: average
    Base: iscsi_write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yamlapi/cluster/counter/tables/namespaceremote.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifavg_write_latency
    Unit: microsec
    Type: average
    Base: iscsi_write_ops
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlperf-object-get-instances namespaceremote_ops
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    iscsi_lif_cmd_transfered

    -

    Command transferred by this iSCSI connection

    +

    namespace_size

    +

    The total provisioned size of the NVMe namespace. Valid in POST and PATCH. The NVMe namespace size can be increased but not be made smaller using the REST interface.
    The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The maximum size is variable with respect to large NVMe namespace support in ONTAP. If large namespaces are supported, the maximum size is 128 TB (140737488355328 bytes) and if not supported, the maximum size is just under 16 TB (17557557870592 bytes). The minimum size supported is always 4096 bytes.
    For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    @@ -32652,20 +36700,20 @@

    iscsi_lif_cmd_transfered

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/iscsi_lifcmd_transferred
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/iscsi_lif.yamlapi/storage/namespacesspace.sizeconf/rest/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifcmd_transfered
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlnvme-namespace-get-iternvme-namespace-info.sizeconf/zapi/cdot/9.8.0/namespace.yaml
    -

    iscsi_lif_iscsi_other_ops

    -

    iSCSI other operations per second on this logical interface (LIF)

    +

    namespace_size_available

    +

    This metric represents the amount of available space in a namespace.

    @@ -32678,20 +36726,20 @@

    iscsi_lif_iscsi_other_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yamlapi/storage/namespacessize, size_usedconf/rest/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifiscsi_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlnvme-namespace-get-itersize, size_usedconf/zapi/cdot/9.8.0/namespace.yaml
    -

    iscsi_lif_iscsi_read_ops

    -

    iSCSI read operations per second on this logical interface (LIF)

    +

    namespace_size_available_percent

    +

    This metric represents the percentage of available space in a namespace.

    @@ -32704,20 +36752,20 @@

    iscsi_lif_iscsi_read_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/iscsi_lifiscsi_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/iscsi_lif.yamlapi/storage/namespacessize_available, sizeconf/rest/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifiscsi_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlnvme-namespace-get-itersize_available, sizeconf/zapi/cdot/9.8.0/namespace.yaml
    -

    iscsi_lif_iscsi_write_ops

    -

    iSCSI write operations per second on this logical interface (LIF)

    +

    namespace_size_used

    +

    The amount of space consumed by the main data stream of the NVMe namespace.
    This value is the total space consumed in the volume by the NVMe namespace, including filesystem overhead, but excluding prefix and suffix streams. Due to internal filesystem overhead and the many ways NVMe filesystems and applications utilize blocks within a namespace, this value does not necessarily reflect actual consumption/availability from the perspective of the filesystem or application. Without specific knowledge of how the namespace blocks are utilized outside of ONTAP, this property should not be used and an indicator for an out-of-space condition.
    For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    @@ -32730,20 +36778,20 @@

    iscsi_lif_iscsi_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yamlapi/storage/namespacesspace.usedconf/rest/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifiscsi_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlnvme-namespace-get-iternvme-namespace-info.size-usedconf/zapi/cdot/9.8.0/namespace.yaml
    -

    iscsi_lif_protocol_errors

    -

    Number of protocol errors from iSCSI sessions on this logical interface (LIF)

    +

    namespace_write_data

    +

    Write bytes

    @@ -32756,20 +36804,20 @@

    iscsi_lif_protocol_errorsUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yamlapi/cluster/counter/tables/namespacewrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifprotocol_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlperf-object-get-instances namespacewrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    iscsi_lif_read_data

    -

    Amount of data read from the storage system in bytes

    +

    namespace_write_ops

    +

    Number of write operations

    @@ -32782,20 +36830,20 @@

    iscsi_lif_read_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yamlapi/cluster/counter/tables/namespacewrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/namespace.yaml
    ZAPIperf-object-get-instances iscsi_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlperf-object-get-instances namespacewrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yaml
    -

    iscsi_lif_write_data

    -

    Amount of data written to the storage system in bytes

    +

    ndmp_session_data_bytes_processed

    +

    Indicates the NDMP data bytes processed.

    @@ -32808,20 +36856,14 @@

    iscsi_lif_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - - + + +
    conf/restperf/9.12.0/iscsi_lif.yaml
    ZAPIperf-object-get-instances iscsi_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iscsi_lif.yamlapi/protocols/ndmp/sessionsdata.bytes_processedconf/rest/9.7.0/ndmp_session.yaml
    -

    iw_avg_latency

    -

    Average RDMA I/O latency.

    +

    ndmp_session_mover_bytes_moved

    +

    Indicates the NDMP mover bytes moved.

    @@ -32834,20 +36876,14 @@

    iw_avg_latencyUnit: microsec
    Type: average
    Base: ops -

    - - - - - - + + +
    conf/restperf/9.14.1/iwarp.yaml
    ZAPIperf-object-get-instances iwarpiw_avg_latency
    Unit: microsec
    Type: average
    Base: iw_ops
    conf/zapiperf/cdot/9.8.0/iwarp.yamlapi/protocols/ndmp/sessionsmover.bytes_movedconf/rest/9.7.0/ndmp_session.yaml
    -

    iw_ops

    -

    Number of RDMA I/Os issued.

    +

    net_port_mtu

    +

    Maximum transmission unit, largest packet size on this network

    @@ -32860,20 +36896,20 @@

    iw_opsUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/iwarp.yamlapi/network/ethernet/portsmtuconf/rest/9.12.0/netport.yaml
    ZAPIperf-object-get-instances iwarpiw_ops
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/iwarp.yamlnet-port-get-iternet-port-info.mtuconf/zapi/cdot/9.8.0/netport.yaml
    -

    iw_read_ops

    -

    Number of RDMA read I/Os issued.

    +

    netstat_bytes_recvd

    +

    Number of bytes received by a TCP connection

    @@ -32885,21 +36921,15 @@

    iw_read_ops

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/iwarpread_ops
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.14.1/iwarp.yaml
    ZAPIperf-object-get-instances iwarpiw_read_ops
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/iwarp.yamlperf-object-get-instances netstatbytes_recvd
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yaml
    -

    iw_write_ops

    -

    Number of RDMA write I/Os issued.

    +

    netstat_bytes_sent

    +

    Number of bytes sent by a TCP connection

    @@ -32911,21 +36941,15 @@

    iw_write_ops

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/iwarpwrite_ops
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.14.1/iwarp.yaml
    ZAPIperf-object-get-instances iwarpiw_write_ops
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/iwarp.yamlperf-object-get-instances netstatbytes_sent
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yaml
    -

    lif_recv_data

    -

    Number of bytes received per second

    +

    netstat_cong_win

    +

    Congestion window of a TCP connection

    @@ -32937,21 +36961,15 @@

    lif_recv_data

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/lifreceived_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances lifrecv_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yamlperf-object-get-instances netstatcong_win
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yaml
    -

    lif_recv_errors

    -

    Number of received Errors per second

    +

    netstat_cong_win_th

    +

    Congestion window threshold of a TCP connection

    @@ -32963,21 +36981,15 @@

    lif_recv_errorsUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances lifrecv_errors
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yamlperf-object-get-instances netstatcong_win_th
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yaml
    -

    lif_recv_packet

    -

    Number of packets received per second

    +

    netstat_ooorcv_pkts

    +

    Number of out-of-order packets received by this TCP connection

    @@ -32989,21 +37001,15 @@

    lif_recv_packetUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances lifrecv_packet
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yamlperf-object-get-instances netstatooorcv_pkts
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yaml
    -

    lif_sent_data

    -

    Number of bytes sent per second

    +

    netstat_recv_window

    +

    Receive window size of a TCP connection

    @@ -33015,21 +37021,15 @@

    lif_sent_data

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/lifsent_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances lifsent_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yamlperf-object-get-instances netstatrecv_window
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yaml
    -

    lif_sent_errors

    -

    Number of sent errors per second

    +

    netstat_rexmit_pkts

    +

    Number of packets retransmitted by this TCP connection

    @@ -33041,21 +37041,15 @@

    lif_sent_errorsUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances lifsent_errors
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yamlperf-object-get-instances netstatrexmit_pkts
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yaml
    -

    lif_sent_packet

    -

    Number of packets sent per second

    +

    netstat_send_window

    +

    Send window size of a TCP connection

    @@ -33067,21 +37061,15 @@

    lif_sent_packetUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.12.0/lif.yaml
    ZAPIperf-object-get-instances lifsent_packet
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lif.yamlperf-object-get-instances netstatsend_window
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yaml
    -

    lun_avg_read_latency

    -

    Average read latency in microseconds for all operations on the LUN

    +

    nfs_clients_idle_duration

    +

    Specifies an ISO-8601 format of date and time to retrieve the idle time duration in hours, minutes, and seconds format.

    @@ -33094,20 +37082,14 @@

    lun_avg_read_latencyUnit: microsec
    Type: average
    Base: read_ops -

    - - - - - - + + +
    conf/restperf/9.12.0/lun.yaml
    ZAPIperf-object-get-instances lunavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/lun.yamlapi/protocols/nfs/connected-clientsidle_durationconf/rest/9.7.0/nfs_clients.yaml
    -

    lun_avg_write_latency

    -

    Average write latency in microseconds for all operations on the LUN

    +

    nfs_diag_storePool_ByteLockAlloc

    +

    Current number of byte range lock objects allocated.

    @@ -33120,20 +37102,20 @@

    lun_avg_write_latencyUnit: microsec
    Type: average
    Base: write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.byte_lock_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_ByteLockAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_avg_xcopy_latency

    -

    Average latency in microseconds for xcopy requests

    +

    nfs_diag_storePool_ByteLockMax

    +

    Maximum number of byte range lock objects.

    @@ -33146,20 +37128,20 @@

    lun_avg_xcopy_latencyUnit: microsec
    Type: average
    Base: xcopy_requests -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.byte_lock_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunavg_xcopy_latency
    Unit: microsec
    Type: average
    Base: xcopy_reqs
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_ByteLockMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_caw_reqs

    -

    Number of compare and write requests

    +

    nfs_diag_storePool_ClientAlloc

    +

    Current number of client objects allocated.

    @@ -33172,20 +37154,20 @@

    lun_caw_reqs

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/luncaw_requests
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.client_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances luncaw_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_ClientAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_enospc

    -

    Number of operations receiving ENOSPC errors

    +

    nfs_diag_storePool_ClientMax

    +

    Maximum number of client objects.

    @@ -33198,20 +37180,20 @@

    lun_enospcUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.client_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunenospc
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_ClientMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_queue_full

    -

    Queue full responses

    +

    nfs_diag_storePool_ConnectionParentSessionReferenceAlloc

    +

    Current number of connection parent session reference objects allocated.

    @@ -33224,20 +37206,20 @@

    lun_queue_fullUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.connection_parent_session_reference_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunqueue_full
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_ConnectionParentSessionReferenceAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_read_align_histo

    -

    Histogram of WAFL read alignment (number sectors off WAFL block start)

    +

    nfs_diag_storePool_ConnectionParentSessionReferenceMax

    +

    Maximum number of connection parent session reference objects.

    @@ -33250,20 +37232,20 @@

    lun_read_align_histoUnit: percent
    Type: percent
    Base: read_ops_sent -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.connection_parent_session_reference_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunread_align_histo
    Unit: percent
    Type: percent
    Base: read_ops_sent
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_ConnectionParentSessionReferenceMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_read_data

    -

    Read bytes

    +

    nfs_diag_storePool_CopyStateAlloc

    +

    Current number of copy state objects allocated.

    @@ -33276,20 +37258,20 @@

    lun_read_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/lunread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.copy_state_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_CopyStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_read_ops

    -

    Number of read operations

    +

    nfs_diag_storePool_CopyStateMax

    +

    Maximum number of copy state objects.

    @@ -33302,20 +37284,20 @@

    lun_read_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/lunread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.copy_state_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_CopyStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_read_partial_blocks

    -

    Percentage of reads whose size is not a multiple of WAFL block size

    +

    nfs_diag_storePool_DelegAlloc

    +

    Current number of delegation lock objects allocated.

    @@ -33328,20 +37310,20 @@

    lun_read_partial_blocks

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/lunread_partial_blocks
    Unit: percent
    Type: percent
    Base: read_ops
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.delegation_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunread_partial_blocks
    Unit: percent
    Type: percent
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_DelegAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_remote_bytes

    -

    I/O to or from a LUN which is not owned by the storage system handling the I/O.

    +

    nfs_diag_storePool_DelegMax

    +

    Maximum number delegation lock objects.

    @@ -33354,20 +37336,20 @@

    lun_remote_bytesUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.delegation_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunremote_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_DelegMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_remote_ops

    -

    Number of operations received by a storage system that does not own the LUN targeted by the operations.

    +

    nfs_diag_storePool_DelegStateAlloc

    +

    Current number of delegation state objects allocated.

    @@ -33380,20 +37362,20 @@

    lun_remote_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.delegation_state_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunremote_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_DelegStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_size

    -

    The total provisioned size of the LUN. The LUN size can be increased but not be made smaller using the REST interface.
    The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The actual minimum and maxiumum sizes vary depending on the ONTAP version, ONTAP platform and the available space in the containing volume and aggregate.
    For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    +

    nfs_diag_storePool_DelegStateMax

    +

    Maximum number of delegation state objects.

    @@ -33406,20 +37388,20 @@

    lun_sizeUnit: none
    Type: raw
    Base: +

    - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIlun-get-iterlun-info.sizeconf/zapi/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_DelegStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_size_used

    -

    The amount of space consumed by the main data stream of the LUN.
    This value is the total space consumed in the volume by the LUN, including filesystem overhead, but excluding prefix and suffix streams. Due to internal filesystem overhead and the many ways SAN filesystems and applications utilize blocks within a LUN, this value does not necessarily reflect actual consumption/availability from the perspective of the filesystem or application. Without specific knowledge of how the LUN blocks are utilized outside of ONTAP, this property should not be used as an indicator for an out-of-space condition.
    For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    +

    nfs_diag_storePool_LayoutAlloc

    +

    Current number of layout objects allocated.

    @@ -33432,20 +37414,20 @@

    lun_size_used

    - - - + + + - - - + + +
    RESTapi/storage/lunsspace.usedconf/rest/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.layout_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIlun-get-iterlun-info.size-usedconf/zapi/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_LayoutAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_size_used_percent

    -

    This metric represents the percentage of a LUN that is currently being used.

    +

    nfs_diag_storePool_LayoutMax

    +

    Maximum number of layout objects.

    @@ -33458,20 +37440,20 @@

    lun_size_used_percentUnit: none
    Type: raw
    Base: +

    - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIlun-get-itersize_used, sizeconf/zapi/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_LayoutMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_unmap_reqs

    -

    Number of unmap command requests

    +

    nfs_diag_storePool_LayoutStateAlloc

    +

    Current number of layout state objects allocated.

    @@ -33484,20 +37466,20 @@

    lun_unmap_reqsUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.layout_state_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lununmap_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_LayoutStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_write_align_histo

    -

    Histogram of WAFL write alignment (number of sectors off WAFL block start)

    +

    nfs_diag_storePool_LayoutStateMax

    +

    Maximum number of layout state objects.

    @@ -33510,20 +37492,20 @@

    lun_write_align_histoUnit: percent
    Type: percent
    Base: write_ops_sent -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.layout_state_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunwrite_align_histo
    Unit: percent
    Type: percent
    Base: write_ops_sent
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_LayoutStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_write_data

    -

    Write bytes

    +

    nfs_diag_storePool_LockStateAlloc

    +

    Current number of lock state objects allocated.

    @@ -33536,20 +37518,20 @@

    lun_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.lock_state_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_LockStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_write_ops

    -

    Number of write operations

    +

    nfs_diag_storePool_LockStateMax

    +

    Maximum number of lock state objects.

    @@ -33562,20 +37544,20 @@

    lun_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.lock_state_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_LockStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_write_partial_blocks

    -

    Percentage of writes whose size is not a multiple of WAFL block size

    +

    nfs_diag_storePool_OpenAlloc

    +

    Current number of share objects allocated.

    @@ -33588,20 +37570,20 @@

    lun_write_partial_blocks

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwrite_partial_blocks
    Unit: percent
    Type: percent
    Base: write_ops
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.open_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunwrite_partial_blocks
    Unit: percent
    Type: percent
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_OpenAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_writesame_reqs

    -

    Number of write same command requests

    +

    nfs_diag_storePool_OpenMax

    +

    Maximum number of share lock objects.

    @@ -33614,20 +37596,20 @@

    lun_writesame_reqsUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.open_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunwritesame_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_OpenMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_writesame_unmap_reqs

    -

    Number of write same commands requests with unmap bit set

    +

    nfs_diag_storePool_OpenStateAlloc

    +

    Current number of open state objects allocated.

    @@ -33640,20 +37622,20 @@

    lun_writesame_unmap_reqs

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/lunwritesame_unmap_requests
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.openstate_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunwritesame_unmap_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_OpenStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    lun_xcopy_reqs

    -

    Total number of xcopy operations on the LUN

    +

    nfs_diag_storePool_OpenStateMax

    +

    Maximum number of open state objects.

    @@ -33666,20 +37648,20 @@

    lun_xcopy_reqsUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/lun.yamlapi/cluster/counter/tables/nfs_v4_diagstorepool.openstate_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPIperf-object-get-instances lunxcopy_reqs
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/lun.yamlperf-object-get-instances nfsv4_diagstorePool_OpenStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_api_time

    -

    amount of time to collect data from monitored cluster object

    +

    nfs_diag_storePool_OwnerAlloc

    +

    Current number of owner objects allocated.

    @@ -33692,20 +37674,20 @@

    metadata_collector_api_timeUnit: microseconds -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.owner_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: microseconds
    NAperf-object-get-instances nfsv4_diagstorePool_OwnerAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_calc_time

    -

    amount of time it took to compute metrics between two successive polls, specifically using properties like raw, delta, rate, average, and percent. This metric is available for ZapiPerf/RestPerf collectors.

    +

    nfs_diag_storePool_OwnerMax

    +

    Maximum number of owner objects.

    @@ -33718,20 +37700,20 @@

    metadata_collector_calc_timeUnit: microseconds -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.owner_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: microseconds
    NAperf-object-get-instances nfsv4_diagstorePool_OwnerMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_instances

    -

    number of objects collected from monitored cluster

    +

    nfs_diag_storePool_SessionAlloc

    +

    Current number of session objects allocated.

    @@ -33744,20 +37726,20 @@

    metadata_collector_instancesUnit: scalar -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.session_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: scalar
    NAperf-object-get-instances nfsv4_diagstorePool_SessionAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_metrics

    -

    number of counters collected from monitored cluster

    +

    nfs_diag_storePool_SessionConnectionHolderAlloc

    +

    Current number of session connection holder objects allocated.

    @@ -33770,20 +37752,20 @@

    metadata_collector_metricsUnit: scalar -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.session_connection_holder_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: scalar
    NAperf-object-get-instances nfsv4_diagstorePool_SessionConnectionHolderAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_parse_time

    -

    amount of time to parse XML, JSON, etc. for cluster object

    +

    nfs_diag_storePool_SessionConnectionHolderMax

    +

    Maximum number of session connection holder objects.

    @@ -33796,20 +37778,20 @@

    metadata_collector_parse_timeUnit: microseconds -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.session_connection_holder_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: microseconds
    NAperf-object-get-instances nfsv4_diagstorePool_SessionConnectionHolderMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_plugin_time

    -

    amount of time for all plugins to post-process metrics

    +

    nfs_diag_storePool_SessionHolderAlloc

    +

    Current number of session holder objects allocated.

    @@ -33822,20 +37804,20 @@

    metadata_collector_plugin_timeUnit: microseconds -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.session_holder_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: microseconds
    NAperf-object-get-instances nfsv4_diagstorePool_SessionHolderAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_poll_time

    -

    amount of time it took for the poll to finish

    +

    nfs_diag_storePool_SessionHolderMax

    +

    Maximum number of session holder objects.

    @@ -33848,20 +37830,20 @@

    metadata_collector_poll_timeUnit: microseconds -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.session_holder_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: microseconds
    NAperf-object-get-instances nfsv4_diagstorePool_SessionHolderMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_skips

    -

    number of metrics that were not calculated between two successive polls. This metric is available for ZapiPerf/RestPerf collectors.

    +

    nfs_diag_storePool_SessionMax

    +

    Maximum number of session objects.

    @@ -33874,20 +37856,20 @@

    metadata_collector_skips

    - - - + + + - - - + + +
    RESTNAHarvest generated
    Unit: scalar
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.session_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: scalar
    NAperf-object-get-instances nfsv4_diagstorePool_SessionMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_collector_task_time

    -

    amount of time it took for each collector's subtasks to complete

    +

    nfs_diag_storePool_StateRefHistoryAlloc

    +

    Current number of state reference callstack history objects allocated.

    @@ -33900,20 +37882,20 @@

    metadata_collector_task_timeUnit: microseconds -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.state_reference_history_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: microseconds
    NAperf-object-get-instances nfsv4_diagstorePool_StateRefHistoryAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_component_count

    -

    number of metrics collected for each object

    +

    nfs_diag_storePool_StateRefHistoryMax

    +

    Maximum number of state reference callstack history objects.

    @@ -33926,20 +37908,20 @@

    metadata_component_count

    - - - + + + - - - + + +
    RESTNAHarvest generated
    Unit: scalar
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.state_reference_history_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: scalar
    NAperf-object-get-instances nfsv4_diagstorePool_StateRefHistoryMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_component_status

    -

    status of the collector - 0 means running, 1 means standby, 2 means failed

    +

    nfs_diag_storePool_StringAlloc

    +

    Current number of string objects allocated.

    @@ -33952,20 +37934,20 @@

    metadata_component_statusUnit: enum -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.string_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: enum
    NAperf-object-get-instances nfsv4_diagstorePool_StringAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_exporter_count

    -

    number of metrics and labels exported

    +

    nfs_diag_storePool_StringMax

    +

    Maximum number of string objects.

    @@ -33978,20 +37960,20 @@

    metadata_exporter_count

    - - - + + + - - - + + +
    RESTNAHarvest generated
    Unit: scalar
    NAapi/cluster/counter/tables/nfs_v4_diagstorepool.string_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yaml
    ZAPINAHarvest generated
    Unit: scalar
    NAperf-object-get-instances nfsv4_diagstorePool_StringMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml
    -

    metadata_exporter_time

    -

    amount of time it took to render, export, and serve exported data

    + +

    Number of link state change from UP to DOWN.

    @@ -34004,20 +37986,20 @@

    metadata_exporter_time

    - - - + + + - - - + + +
    RESTNAHarvest generated
    Unit: microseconds
    NAapi/cluster/counter/tables/nic_commonlink_up_to_down
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/nic_common.yaml
    ZAPINAHarvest generated
    Unit: microseconds
    NAperf-object-get-instances nic_commonlink_up_to_downs
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    metadata_target_goroutines

    -

    number of goroutines that exist within the poller

    +

    nic_rx_alignment_errors

    +

    Alignment errors detected on received packets

    @@ -34030,20 +38012,20 @@

    metadata_target_goroutinesUnit: scalar -

    + + + - - - + + +
    NAapi/cluster/counter/tables/nic_commonreceive_alignment_errors
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/nic_common.yaml
    ZAPINAHarvest generated
    Unit: scalar
    NAperf-object-get-instances nic_commonrx_alignment_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    metadata_target_status

    -

    status of the system being monitored. 0 means reachable, 1 means unreachable

    +

    nic_rx_bytes

    +

    Bytes received

    @@ -34056,20 +38038,20 @@

    metadata_target_status

    - - - + + + - - - + + +
    RESTNAHarvest generated
    Unit: enum
    NAapi/cluster/counter/tables/nic_commonreceive_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nic_common.yaml
    ZAPINAHarvest generated
    Unit: enum
    NAperf-object-get-instances nic_commonrx_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    metrocluster_check_aggr_status

    -

    Detail of the type of diagnostic operation run for the Aggregate with diagnostic operation result.

    +

    nic_rx_crc_errors

    +

    CRC errors detected on received packets

    @@ -34082,14 +38064,20 @@

    metrocluster_check_aggr_statusUnit: none
    Type: delta
    Base: +

    + + + + + +
    conf/restperf/9.12.0/nic_common.yaml
    ZAPIperf-object-get-instances nic_commonrx_crc_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    metrocluster_check_cluster_status

    -

    Detail of the type of diagnostic operation run for the Cluster with diagnostic operation result.

    +

    nic_rx_errors

    +

    Error received

    @@ -34102,14 +38090,20 @@

    metrocluster_check_cluster_status

    - - - + + + + + + + + +
    RESTNAHarvest generatedconf/rest/9.12.0/metrocluster_check.yamlapi/cluster/counter/tables/nic_commonreceive_errors
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nic_common.yaml
    ZAPIperf-object-get-instances nic_commonrx_errors
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    metrocluster_check_node_status

    -

    Detail of the type of diagnostic operation run for the Node with diagnostic operation result.

    +

    nic_rx_length_errors

    +

    Length errors detected on received packets

    @@ -34122,14 +38116,20 @@

    metrocluster_check_node_statusUnit: none
    Type: delta
    Base: +

    + + + + + +
    conf/restperf/9.12.0/nic_common.yaml
    ZAPIperf-object-get-instances nic_commonrx_length_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    metrocluster_check_volume_status

    -

    Detail of the type of diagnostic operation run for the Volume with diagnostic operation result.

    +

    nic_rx_total_errors

    +

    Total errors received

    @@ -34142,14 +38142,20 @@

    metrocluster_check_volume_status

    - - - + + + + + + + + +
    RESTNAHarvest generatedconf/rest/9.12.0/metrocluster_check.yamlapi/cluster/counter/tables/nic_commonreceive_total_errors
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/nic_common.yaml
    ZAPIperf-object-get-instances nic_commonrx_total_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    namespace_avg_other_latency

    -

    Average other ops latency in microseconds for all operations on the Namespace

    +

    nic_tx_bytes

    +

    Bytes sent

    @@ -34162,20 +38168,20 @@

    namespace_avg_other_latencyUnit: microsec
    Type: average
    Base: other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/nic_commontransmit_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nic_common.yaml
    ZAPIperf-object-get-instances namespaceavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances nic_commontx_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    namespace_avg_read_latency

    -

    Average read latency in microseconds for all operations on the Namespace

    +

    nic_tx_errors

    +

    Error sent

    @@ -34188,20 +38194,20 @@

    namespace_avg_read_latencyUnit: microsec
    Type: average
    Base: read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/nic_commontransmit_errors
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nic_common.yaml
    ZAPIperf-object-get-instances namespaceavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances nic_commontx_errors
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    namespace_avg_write_latency

    -

    Average write latency in microseconds for all operations on the Namespace

    +

    nic_tx_hw_errors

    +

    Transmit errors reported by hardware

    @@ -34214,20 +38220,20 @@

    namespace_avg_write_latencyUnit: microsec
    Type: average
    Base: write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/nic_commontransmit_hw_errors
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/nic_common.yaml
    ZAPIperf-object-get-instances namespaceavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances nic_commontx_hw_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    namespace_block_size

    -

    The size of blocks in the namespace in bytes.
    Valid in POST when creating an NVMe namespace that is not a clone of another. Disallowed in POST when creating a namespace clone. Valid in POST.

    +

    nic_tx_total_errors

    +

    Total errors sent

    @@ -34240,20 +38246,20 @@

    namespace_block_sizeUnit: none
    Type: delta
    Base: +

    - - - + + +
    conf/restperf/9.12.0/nic_common.yaml
    ZAPInvme-namespace-get-iternvme-namespace-info.block-sizeconf/zapi/cdot/9.8.0/namespace.yamlperf-object-get-instances nic_commontx_total_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yaml
    -

    namespace_other_ops

    -

    Number of other operations

    +

    node_avg_processor_busy

    +

    Average processor utilization across active processors in the system

    @@ -34266,20 +38272,20 @@

    namespace_other_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/system:nodeaverage_processor_busy_percent
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances namespaceother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances system:nodeavg_processor_busy
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    namespace_read_data

    -

    Read bytes

    +

    node_cifs_connections

    +

    Number of connections

    @@ -34292,20 +38298,20 @@

    namespace_read_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/svm_cifs:nodeconnections
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPIperf-object-get-instances namespaceread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances cifs:nodeconnections
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    namespace_read_ops

    -

    Number of read operations

    +

    node_cifs_established_sessions

    +

    Number of established SMB and SMB2 sessions

    @@ -34318,20 +38324,20 @@

    namespace_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/svm_cifs:nodeestablished_sessions
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPIperf-object-get-instances namespaceread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances cifs:nodeestablished_sessions
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    namespace_remote_bytes

    -

    Remote read bytes

    +

    node_cifs_latency

    +

    Average latency for CIFS operations

    @@ -34344,20 +38350,20 @@

    namespace_remote_bytes

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/namespaceremote.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/svm_cifs:nodelatency
    Unit: microsec
    Type: average
    Base: latency_base
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPIperf-object-get-instances namespaceremote_bytes
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances cifs:nodecifs_latency
    Unit: microsec
    Type: average
    Base: cifs_latency_base
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    namespace_remote_ops

    -

    Number of remote read operations

    +

    node_cifs_op_count

    +

    Array of select CIFS operation counts

    @@ -34370,20 +38376,20 @@

    namespace_remote_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/svm_cifs:nodeop_count
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPIperf-object-get-instances namespaceremote_ops
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances cifs:nodecifs_op_count
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    namespace_size

    -

    The total provisioned size of the NVMe namespace. Valid in POST and PATCH. The NVMe namespace size can be increased but not be made smaller using the REST interface.
    The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The maximum size is variable with respect to large NVMe namespace support in ONTAP. If large namespaces are supported, the maximum size is 128 TB (140737488355328 bytes) and if not supported, the maximum size is just under 16 TB (17557557870592 bytes). The minimum size supported is always 4096 bytes.
    For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    +

    node_cifs_open_files

    +

    Number of open files over SMB and SMB2

    @@ -34396,20 +38402,20 @@

    namespace_sizeUnit: none
    Type: raw
    Base: +

    - - - + + +
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPInvme-namespace-get-iternvme-namespace-info.sizeconf/zapi/cdot/9.8.0/namespace.yamlperf-object-get-instances cifs:nodeopen_files
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    namespace_size_available

    -

    This metric represents the amount of available space in a namespace.

    +

    node_cifs_ops

    +

    Number of CIFS operations per second

    @@ -34422,20 +38428,20 @@

    namespace_size_available

    - - - + + + - - - + + +
    RESTapi/storage/namespacessize, size_usedconf/rest/9.12.0/namespace.yamlapi/cluster/counter/tables/system:nodecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPInvme-namespace-get-itersize, size_usedconf/zapi/cdot/9.8.0/namespace.yamlperf-object-get-instances system:nodecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    namespace_size_available_percent

    -

    This metric represents the percentage of available space in a namespace.

    +

    node_cifs_read_latency

    +

    Average latency for CIFS read operations

    @@ -34448,20 +38454,20 @@

    namespace_size_available_percent

    - - - + + + - - - + + +
    RESTapi/storage/namespacessize_available, sizeconf/rest/9.12.0/namespace.yamlapi/cluster/counter/tables/svm_cifs:nodeaverage_read_latency
    Unit: microsec
    Type: average
    Base: total_read_ops
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPInvme-namespace-get-itersize_available, sizeconf/zapi/cdot/9.8.0/namespace.yamlperf-object-get-instances cifs:nodecifs_read_latency
    Unit: microsec
    Type: average
    Base: cifs_read_ops
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    namespace_size_used

    -

    The amount of space consumed by the main data stream of the NVMe namespace.
    This value is the total space consumed in the volume by the NVMe namespace, including filesystem overhead, but excluding prefix and suffix streams. Due to internal filesystem overhead and the many ways NVMe filesystems and applications utilize blocks within a namespace, this value does not necessarily reflect actual consumption/availability from the perspective of the filesystem or application. Without specific knowledge of how the namespace blocks are utilized outside of ONTAP, this property should not be used and an indicator for an out-of-space condition.
    For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    +

    node_cifs_read_ops

    +

    Total number of CIFS read operations

    @@ -34474,20 +38480,20 @@

    namespace_size_usedUnit: per_sec
    Type: rate
    Base: +

    - - - + + +
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPInvme-namespace-get-iternvme-namespace-info.size-usedconf/zapi/cdot/9.8.0/namespace.yamlperf-object-get-instances cifs:nodecifs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    namespace_write_data

    -

    Write bytes

    +

    node_cifs_total_ops

    +

    Total number of CIFS operations

    @@ -34500,20 +38506,20 @@

    namespace_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/svm_cifs:nodetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPIperf-object-get-instances namespacewrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances cifs:nodecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    namespace_write_ops

    -

    Number of write operations

    +

    node_cifs_write_latency

    +

    Average latency for CIFS write operations

    @@ -34526,20 +38532,20 @@

    namespace_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/namespace.yamlapi/cluster/counter/tables/svm_cifs:nodeaverage_write_latency
    Unit: microsec
    Type: average
    Base: total_write_ops
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPIperf-object-get-instances namespacewrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/namespace.yamlperf-object-get-instances cifs:nodecifs_write_latency
    Unit: microsec
    Type: average
    Base: cifs_write_ops
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    ndmp_session_data_bytes_processed

    -

    Indicates the NDMP data bytes processed.

    +

    node_cifs_write_ops

    +

    Total number of CIFS write operations

    @@ -34552,14 +38558,20 @@

    ndmp_session_data_bytes_processed

    - - - + + + + + + + + +
    RESTapi/protocols/ndmp/sessionsdata.bytes_processedconf/rest/9.7.0/ndmp_session.yamlapi/cluster/counter/tables/svm_cifs:nodetotal_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/cifs_node.yaml
    ZAPIperf-object-get-instances cifs:nodecifs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yaml
    -

    ndmp_session_mover_bytes_moved

    -

    Indicates the NDMP mover bytes moved.

    +

    node_cpu_busy

    +

    System CPU resource utilization. Returns a computed percentage for the default CPU field. Basically computes a 'cpu usage summary' value which indicates how 'busy' the system is based upon the most heavily utilized domain. The idea is to determine the amount of available CPU until we're limited by either a domain maxing out OR we exhaust all available idle CPU cycles, whichever occurs first.

    @@ -34572,14 +38584,20 @@

    ndmp_session_mover_bytes_movedUnit: percent
    Type: percent
    Base: cpu_elapsed_time +

    + + + + + +
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances system:nodecpu_busy
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    net_port_mtu

    -

    Maximum transmission unit, largest packet size on this network

    +

    node_cpu_busytime

    +

    The time (in hundredths of a second) that the CPU has been doing useful work since the last boot

    @@ -34592,20 +38610,20 @@

    net_port_mtu

    - - - + + + - - - + + +
    RESTapi/network/ethernet/portsmtuconf/rest/9.12.0/netport.yamlapi/private/cli/nodecpu_busy_timeconf/rest/9.12.0/node.yaml
    ZAPInet-port-get-iternet-port-info.mtuconf/zapi/cdot/9.8.0/netport.yamlsystem-node-get-iternode-details-info.cpu-busytimeconf/zapi/cdot/9.8.0/node.yaml
    -

    netstat_bytes_recvd

    -

    Number of bytes received by a TCP connection

    +

    node_cpu_domain_busy

    +

    Array of processor time in percentage spent in various domains

    @@ -34617,15 +38635,21 @@

    netstat_bytes_recvdUnit: percent
    Type: percent
    Base: cpu_elapsed_time +

    + + - - - + + +
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances netstatbytes_recvd
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yamlperf-object-get-instances system:nodedomain_busy
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    netstat_bytes_sent

    -

    Number of bytes sent by a TCP connection

    +

    node_cpu_elapsed_time

    +

    Elapsed time since boot

    @@ -34637,15 +38661,21 @@

    netstat_bytes_sentUnit: microsec
    Type: delta
    Base: +

    + + - - - + + +
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances netstatbytes_sent
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yamlperf-object-get-instances system:nodecpu_elapsed_time
    Unit: none
    Type: delta,no-display
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    netstat_cong_win

    -

    Congestion window of a TCP connection

    +

    node_disk_busy

    +

    The utilization percent of the disk. node_disk_busy is disk_busy aggregated by node.

    @@ -34657,15 +38687,21 @@

    netstat_cong_winUnit: percent
    Type: percent
    Base: base_for_disk_busy +

    + + - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances netstatcong_win
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yamlperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    netstat_cong_win_th

    -

    Congestion window threshold of a TCP connection

    +

    node_disk_capacity

    +

    Disk capacity in MB. node_disk_capacity is disk_capacity aggregated by node.

    @@ -34677,15 +38713,21 @@

    netstat_cong_win_thUnit: mb
    Type: raw
    Base: +

    + + - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances netstatcong_win_th
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yamlperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    netstat_ooorcv_pkts

    -

    Number of out-of-order packets received by this TCP connection

    +

    node_disk_cp_read_chain

    +

    Average number of blocks transferred in each consistency point read operation during a CP. node_disk_cp_read_chain is disk_cp_read_chain aggregated by node.

    @@ -34697,15 +38739,21 @@

    netstat_ooorcv_pktsUnit: none
    Type: average
    Base: cp_read_count +

    + + - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances netstatooorcv_pkts
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yamlperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    netstat_recv_window

    -

    Receive window size of a TCP connection

    +

    node_disk_cp_read_latency

    +

    Average latency per block in microseconds for consistency point read operations. node_disk_cp_read_latency is disk_cp_read_latency aggregated by node.

    @@ -34717,15 +38765,21 @@

    netstat_recv_windowUnit: microsec
    Type: average
    Base: cp_read_blocks +

    + + - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances netstatrecv_window
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yamlperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    netstat_rexmit_pkts

    -

    Number of packets retransmitted by this TCP connection

    +

    node_disk_cp_reads

    +

    Number of disk read operations initiated each second for consistency point processing. node_disk_cp_reads is disk_cp_reads aggregated by node.

    @@ -34737,15 +38791,21 @@

    netstat_rexmit_pktsUnit: per_sec
    Type: rate
    Base: +

    + + - - - + + +
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances netstatrexmit_pkts
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yamlperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    netstat_send_window

    -

    Send window size of a TCP connection

    +

    node_disk_data_read

    +

    Number of disk kilobytes (KB) read per second

    @@ -34757,15 +38817,21 @@

    netstat_send_windowUnit: kb_per_sec
    Type: rate
    Base: +

    + + - - - + + +
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances netstatsend_window
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/netstat.yamlperf-object-get-instances system:nodedisk_data_read
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nfs_clients_idle_duration

    -

    Specifies an ISO-8601 format of date and time to retrieve the idle time duration in hours, minutes, and seconds format.

    +

    node_disk_data_written

    +

    Number of disk kilobytes (KB) written per second

    @@ -34778,14 +38844,20 @@

    nfs_clients_idle_durationUnit: kb_per_sec
    Type: rate
    Base: +

    + + + + + +
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances system:nodedisk_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nfs_diag_storePool_ByteLockAlloc

    -

    Current number of byte range lock objects allocated.

    +

    node_disk_io_pending

    +

    Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_io_pending is disk_io_pending aggregated by node.

    @@ -34798,20 +38870,20 @@

    nfs_diag_storePool_ByteLockAlloc

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.byte_lock_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_ByteLockAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_ByteLockMax

    -

    Maximum number of byte range lock objects.

    +

    node_disk_io_queued

    +

    Number of I/Os queued to the disk but not yet issued. node_disk_io_queued is disk_io_queued aggregated by node.

    @@ -34824,20 +38896,20 @@

    nfs_diag_storePool_ByteLockMaxUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_ByteLockMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_ClientAlloc

    -

    Current number of client objects allocated.

    +

    node_disk_max_busy

    +

    The utilization percent of the disk. node_disk_max_busy is the maximum of disk_busy for label node.

    @@ -34850,20 +38922,20 @@

    nfs_diag_storePool_ClientAllocUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentdisk_busy_percent
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_ClientAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_ClientMax

    -

    Maximum number of client objects.

    +

    node_disk_max_capacity

    +

    Disk capacity in MB. node_disk_max_capacity is the maximum of disk_capacity for label node.

    @@ -34876,20 +38948,20 @@

    nfs_diag_storePool_ClientMaxUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_ClientMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_ConnectionParentSessionReferenceAlloc

    -

    Current number of connection parent session reference objects allocated.

    +

    node_disk_max_cp_read_chain

    +

    Average number of blocks transferred in each consistency point read operation during a CP. node_disk_max_cp_read_chain is the maximum of disk_cp_read_chain for label node.

    @@ -34902,20 +38974,20 @@

    nfs_diag_store

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.connection_parent_session_reference_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_ConnectionParentSessionReferenceAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_ConnectionParentSessionReferenceMax

    -

    Maximum number of connection parent session reference objects.

    +

    node_disk_max_cp_read_latency

    +

    Average latency per block in microseconds for consistency point read operations. node_disk_max_cp_read_latency is the maximum of disk_cp_read_latency for label node.

    @@ -34928,20 +39000,20 @@

    nfs_diag_storePo

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.connection_parent_session_reference_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_ConnectionParentSessionReferenceMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_CopyStateAlloc

    -

    Current number of copy state objects allocated.

    +

    node_disk_max_cp_reads

    +

    Number of disk read operations initiated each second for consistency point processing. node_disk_max_cp_reads is the maximum of disk_cp_reads for label node.

    @@ -34954,20 +39026,20 @@

    nfs_diag_storePool_CopyStateAlloc

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.copy_state_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_CopyStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_CopyStateMax

    -

    Maximum number of copy state objects.

    +

    node_disk_max_io_pending

    +

    Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_max_io_pending is the maximum of disk_io_pending for label node.

    @@ -34980,20 +39052,20 @@

    nfs_diag_storePool_CopyStateMax

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.copy_state_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_CopyStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_DelegAlloc

    -

    Current number of delegation lock objects allocated.

    +

    node_disk_max_io_queued

    +

    Number of I/Os queued to the disk but not yet issued. node_disk_max_io_queued is the maximum of disk_io_queued for label node.

    @@ -35006,20 +39078,20 @@

    nfs_diag_storePool_DelegAllocUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_DelegAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_DelegMax

    -

    Maximum number delegation lock objects.

    +

    node_disk_max_total_data

    +

    Total throughput for user operations per second. node_disk_max_total_data is the maximum of disk_total_data for label node.

    @@ -35032,20 +39104,20 @@

    nfs_diag_storePool_DelegMaxUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_DelegMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_DelegStateAlloc

    -

    Current number of delegation state objects allocated.

    +

    node_disk_max_total_transfers

    +

    Total number of disk operations involving data transfer initiated per second. node_disk_max_total_transfers is the maximum of disk_total_transfers for label node.

    @@ -35058,20 +39130,20 @@

    nfs_diag_storePool_DelegStateAlloc

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.delegation_state_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituenttotal_transfer_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_DelegStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_DelegStateMax

    -

    Maximum number of delegation state objects.

    +

    node_disk_max_user_read_blocks

    +

    Number of blocks transferred for user read operations per second. node_disk_max_user_read_blocks is the maximum of disk_user_read_blocks for label node.

    @@ -35084,20 +39156,20 @@

    nfs_diag_storePool_DelegStateMax

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.delegation_state_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_read_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_DelegStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_LayoutAlloc

    -

    Current number of layout objects allocated.

    +

    node_disk_max_user_read_chain

    +

    Average number of blocks transferred in each user read operation. node_disk_max_user_read_chain is the maximum of disk_user_read_chain for label node.

    @@ -35110,20 +39182,20 @@

    nfs_diag_storePool_LayoutAllocUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_LayoutAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_LayoutMax

    -

    Maximum number of layout objects.

    +

    node_disk_max_user_read_latency

    +

    Average latency per block in microseconds for user read operations. node_disk_max_user_read_latency is the maximum of disk_user_read_latency for label node.

    @@ -35136,20 +39208,20 @@

    nfs_diag_storePool_LayoutMaxUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_LayoutMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_LayoutStateAlloc

    -

    Current number of layout state objects allocated.

    +

    node_disk_max_user_reads

    +

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_max_user_reads is the maximum of disk_user_reads for label node.

    @@ -35162,20 +39234,20 @@

    nfs_diag_storePool_LayoutStateAlloc

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.layout_state_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_LayoutStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_LayoutStateMax

    -

    Maximum number of layout state objects.

    +

    node_disk_max_user_write_blocks

    +

    Number of blocks transferred for user write operations per second. node_disk_max_user_write_blocks is the maximum of disk_user_write_blocks for label node.

    @@ -35188,20 +39260,20 @@

    nfs_diag_storePool_LayoutStateMax

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.layout_state_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_LayoutStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_LockStateAlloc

    -

    Current number of lock state objects allocated.

    +

    node_disk_max_user_write_chain

    +

    Average number of blocks transferred in each user write operation. node_disk_max_user_write_chain is the maximum of disk_user_write_chain for label node.

    @@ -35214,20 +39286,20 @@

    nfs_diag_storePool_LockStateAlloc

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.lock_state_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_write_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_LockStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_LockStateMax

    -

    Maximum number of lock state objects.

    +

    node_disk_max_user_write_latency

    +

    Average latency per block in microseconds for user write operations. node_disk_max_user_write_latency is the maximum of disk_user_write_latency for label node.

    @@ -35240,20 +39312,20 @@

    nfs_diag_storePool_LockStateMax

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.lock_state_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_LockStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_OpenAlloc

    -

    Current number of share objects allocated.

    +

    node_disk_max_user_writes

    +

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_max_user_writes is the maximum of disk_user_writes for label node.

    @@ -35266,20 +39338,20 @@

    nfs_diag_storePool_OpenAllocUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_write_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_OpenAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_OpenMax

    -

    Maximum number of share lock objects.

    +

    node_disk_total_data

    +

    Total throughput for user operations per second. node_disk_total_data is disk_total_data aggregated by node.

    @@ -35292,20 +39364,20 @@

    nfs_diag_storePool_OpenMaxUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_OpenMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_OpenStateAlloc

    -

    Current number of open state objects allocated.

    +

    node_disk_total_transfers

    +

    Total number of disk operations involving data transfer initiated per second. node_disk_total_transfers is disk_total_transfers aggregated by node.

    @@ -35318,20 +39390,20 @@

    nfs_diag_storePool_OpenStateAlloc

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.openstate_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituenttotal_transfer_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_OpenStateAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_OpenStateMax

    -

    Maximum number of open state objects.

    +

    node_disk_user_read_blocks

    +

    Number of blocks transferred for user read operations per second. node_disk_user_read_blocks is disk_user_read_blocks aggregated by node.

    @@ -35344,20 +39416,20 @@

    nfs_diag_storePool_OpenStateMax

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.openstate_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_read_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_OpenStateMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_OwnerAlloc

    -

    Current number of owner objects allocated.

    +

    node_disk_user_read_chain

    +

    Average number of blocks transferred in each user read operation. node_disk_user_read_chain is disk_user_read_chain aggregated by node.

    @@ -35370,20 +39442,20 @@

    nfs_diag_storePool_OwnerAllocUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_OwnerAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_OwnerMax

    -

    Maximum number of owner objects.

    +

    node_disk_user_read_latency

    +

    Average latency per block in microseconds for user read operations. node_disk_user_read_latency is disk_user_read_latency aggregated by node.

    @@ -35396,20 +39468,20 @@

    nfs_diag_storePool_OwnerMaxUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_OwnerMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_SessionAlloc

    -

    Current number of session objects allocated.

    +

    node_disk_user_reads

    +

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_user_reads is disk_user_reads aggregated by node.

    @@ -35422,20 +39494,20 @@

    nfs_diag_storePool_SessionAlloc

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.session_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_SessionAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_SessionConnectionHolderAlloc

    -

    Current number of session connection holder objects allocated.

    +

    node_disk_user_write_blocks

    +

    Number of blocks transferred for user write operations per second. node_disk_user_write_blocks is disk_user_write_blocks aggregated by node.

    @@ -35448,20 +39520,20 @@

    nfs_diag_storePool_Sess

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.session_connection_holder_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_SessionConnectionHolderAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_SessionConnectionHolderMax

    -

    Maximum number of session connection holder objects.

    +

    node_disk_user_write_chain

    +

    Average number of blocks transferred in each user write operation. node_disk_user_write_chain is disk_user_write_chain aggregated by node.

    @@ -35474,20 +39546,20 @@

    nfs_diag_storePool_Sessio

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.session_connection_holder_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_write_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_SessionConnectionHolderMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_SessionHolderAlloc

    -

    Current number of session holder objects allocated.

    +

    node_disk_user_write_latency

    +

    Average latency per block in microseconds for user write operations. node_disk_user_write_latency is disk_user_write_latency aggregated by node.

    @@ -35500,20 +39572,20 @@

    nfs_diag_storePool_SessionHolderA

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.session_holder_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_SessionHolderAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_SessionHolderMax

    -

    Maximum number of session holder objects.

    +

    node_disk_user_writes

    +

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_user_writes is disk_user_writes aggregated by node.

    @@ -35526,20 +39598,20 @@

    nfs_diag_storePool_SessionHolderMax

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.session_holder_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/disk:constituentuser_write_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_SessionHolderMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    nfs_diag_storePool_SessionMax

    -

    Maximum number of session objects.

    +

    node_failed_fan

    +

    Specifies a count of the number of chassis fans that are not operating within the recommended RPM range.

    @@ -35552,20 +39624,20 @@

    nfs_diag_storePool_SessionMaxUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/nodescontroller.failed_fan.countconf/rest/9.12.0/node.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_SessionMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlsystem-node-get-iternode-details-info.env-failed-fan-countconf/zapi/cdot/9.8.0/node.yaml
    -

    nfs_diag_storePool_StateRefHistoryAlloc

    -

    Current number of state reference callstack history objects allocated.

    +

    node_failed_power

    +

    Number of failed power supply units.

    @@ -35578,20 +39650,20 @@

    nfs_diag_storePool_StateRefHist

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.state_reference_history_allocated
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/nodescontroller.failed_power_supply.countconf/rest/9.12.0/node.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_StateRefHistoryAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlsystem-node-get-iternode-details-info.env-failed-power-supply-countconf/zapi/cdot/9.8.0/node.yaml
    -

    nfs_diag_storePool_StateRefHistoryMax

    -

    Maximum number of state reference callstack history objects.

    +

    node_fcp_data_recv

    +

    Number of FCP kilobytes (KB) received per second

    @@ -35604,20 +39676,20 @@

    nfs_diag_storePool_StateRefHistor

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nfs_v4_diagstorepool.state_reference_history_maximum
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/system:nodefcp_data_received
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_StateRefHistoryMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances system:nodefcp_data_recv
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nfs_diag_storePool_StringAlloc

    -

    Current number of string objects allocated.

    +

    node_fcp_data_sent

    +

    Number of FCP kilobytes (KB) sent per second

    @@ -35630,20 +39702,20 @@

    nfs_diag_storePool_StringAllocUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/system:nodefcp_data_sent
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_StringAlloc
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances system:nodefcp_data_sent
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nfs_diag_storePool_StringMax

    -

    Maximum number of string objects.

    +

    node_fcp_ops

    +

    Number of FCP operations per second

    @@ -35656,20 +39728,20 @@

    nfs_diag_storePool_StringMaxUnit: none
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_pool.yamlapi/cluster/counter/tables/system:nodefcp_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nfsv4_diagstorePool_StringMax
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_pool.yamlperf-object-get-instances system:nodefcp_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    - -

    Number of link state change from UP to DOWN.

    +

    node_hdd_data_read

    +

    Number of HDD Disk kilobytes (KB) read per second

    @@ -35682,20 +39754,20 @@ + + + - - - + + +
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/system:nodehdd_data_read
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nic_commonlink_up_to_downs
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances system:nodehdd_data_read
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nic_rx_alignment_errors

    -

    Alignment errors detected on received packets

    +

    node_hdd_data_written

    +

    Number of HDD kilobytes (KB) written per second

    @@ -35708,20 +39780,20 @@

    nic_rx_alignment_errors

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nic_commonreceive_alignment_errors
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/system:nodehdd_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nic_commonrx_alignment_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances system:nodehdd_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nic_rx_bytes

    -

    Bytes received

    +

    node_iscsi_ops

    +

    Number of iSCSI operations per second

    @@ -35734,20 +39806,20 @@

    nic_rx_bytes

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nic_commonreceive_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/system:nodeiscsi_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nic_commonrx_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances system:nodeiscsi_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nic_rx_crc_errors

    -

    CRC errors detected on received packets

    +

    node_memory

    +

    Total memory in megabytes (MB)

    @@ -35760,20 +39832,20 @@

    nic_rx_crc_errorsUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/system:nodememory
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nic_commonrx_crc_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances system:nodememory
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nic_rx_errors

    -

    Error received

    +

    node_net_data_recv

    +

    Number of network kilobytes (KB) received per second

    @@ -35786,20 +39858,20 @@

    nic_rx_errors

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nic_commonreceive_errors
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/system:nodenetwork_data_received
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nic_commonrx_errors
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances system:nodenet_data_recv
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nic_rx_length_errors

    -

    Length errors detected on received packets

    +

    node_net_data_sent

    +

    Number of network kilobytes (KB) sent per second

    @@ -35812,20 +39884,20 @@

    nic_rx_length_errorsUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/system:nodenetwork_data_sent
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nic_commonrx_length_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances system:nodenet_data_sent
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    nic_rx_total_errors

    -

    Total errors received

    +

    node_nfs_access_avg_latency

    +

    Average latency of Access procedure requests. The counter keeps track of the average response time of Access requests.

    @@ -35838,20 +39910,56 @@

    nic_rx_total_errorsUnit: none
    Type: delta
    Base: -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/svm_nfs_v3:nodeaccess.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodeaccess.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeaccess.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeaccess.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nic_commonrx_total_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances nfsv3:nodeaccess_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeaccess_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeaccess_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeaccess_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    nic_tx_bytes

    -

    Bytes sent

    +

    node_nfs_access_total

    +

    Total number of Access procedure requests. It is the total number of access success and access error requests.

    @@ -35864,20 +39972,56 @@

    nic_tx_bytes

    - - - + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/nic_commontransmit_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/svm_nfs_v3:nodeaccess.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodeaccess.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeaccess.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeaccess.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nic_commontx_bytes
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances nfsv3:nodeaccess_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeaccess_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeaccess_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeaccess_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    nic_tx_errors

    -

    Error sent

    +

    node_nfs_backchannel_ctl_avg_latency

    +

    Average latency of BACKCHANNEL_CTL operations.

    @@ -35890,20 +40034,32 @@

    nic_tx_errors

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/nic_commontransmit_errors
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/svm_nfs_v41:nodebackchannel_ctl.average_latency
    Unit: microsec
    Type: average
    Base: backchannel_ctl.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodebackchannel_ctl.average_latency
    Unit: microsec
    Type: average
    Base: backchannel_ctl.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nic_commontx_errors
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances nfsv4_1:nodebackchannel_ctl_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: backchannel_ctl_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodebackchannel_ctl_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: backchannel_ctl_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    nic_tx_hw_errors

    -

    Transmit errors reported by hardware

    +

    node_nfs_backchannel_ctl_total

    +

    Total number of BACKCHANNEL_CTL operations.

    @@ -35916,20 +40072,32 @@

    nic_tx_hw_errorsUnit: none
    Type: delta
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/svm_nfs_v41:nodebackchannel_ctl.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodebackchannel_ctl.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nic_commontx_hw_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances nfsv4_1:nodebackchannel_ctl_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodebackchannel_ctl_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    nic_tx_total_errors

    -

    Total errors sent

    +

    node_nfs_bind_conn_to_session_avg_latency

    +

    Average latency of BIND_CONN_TO_SESSION operations.

    @@ -35942,20 +40110,32 @@

    nic_tx_total_errorsUnit: none
    Type: delta
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/nic_common.yamlapi/cluster/counter/tables/svm_nfs_v41:nodebind_connections_to_session.average_latency
    Unit: microsec
    Type: average
    Base: bind_connections_to_session.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodebind_conn_to_session.average_latency
    Unit: microsec
    Type: average
    Base: bind_conn_to_session.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nic_commontx_total_errors
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/nic_common.yamlperf-object-get-instances nfsv4_1:nodebind_conn_to_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: bind_conn_to_session_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodebind_conn_to_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: bind_conn_to_session_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_avg_processor_busy

    -

    Average processor utilization across active processors in the system

    +

    node_nfs_bind_conn_to_session_total

    +

    Total number of BIND_CONN_TO_SESSION operations.

    @@ -35968,20 +40148,32 @@

    node_avg_processor_busy

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/system:nodeaverage_processor_busy_percent
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodebind_connections_to_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodebind_conn_to_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances system:nodeavg_processor_busy
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodebind_conn_to_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodebind_conn_to_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_cifs_connections

    -

    Number of connections

    +

    node_nfs_close_avg_latency

    +

    Average latency of CLOSE operations.

    @@ -35994,20 +40186,44 @@

    node_cifs_connectionsUnit: none
    Type: raw
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodeclose.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeclose.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeclose.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances cifs:nodeconnections
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlperf-object-get-instances nfsv4_1:nodeclose_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeclose_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeclose_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_cifs_established_sessions

    -

    Number of established SMB and SMB2 sessions

    +

    node_nfs_close_total

    +

    Total number of CLOSE operations.

    @@ -36020,20 +40236,44 @@

    node_cifs_established_sessionsUnit: none
    Type: raw
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodeclose.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeclose.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeclose.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances cifs:nodeestablished_sessions
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlperf-object-get-instances nfsv4_1:nodeclose_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeclose_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeclose_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_cifs_latency

    -

    Average latency for CIFS operations

    +

    node_nfs_commit_avg_latency

    +

    Average latency of Commit procedure requests. The counter keeps track of the average response time of Commit requests.

    @@ -36046,20 +40286,56 @@

    node_cifs_latencyUnit: microsec
    Type: average
    Base: latency_base -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodecommit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodecommit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodecommit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodecommit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances cifs:nodecifs_latency
    Unit: microsec
    Type: average
    Base: cifs_latency_base
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlperf-object-get-instances nfsv3:nodecommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodecommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodecommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodecommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_cifs_op_count

    -

    Array of select CIFS operation counts

    +

    node_nfs_commit_total

    +

    Total number of Commit procedure requests. It is the total number of Commit success and Commit error requests.

    @@ -36072,20 +40348,56 @@

    node_cifs_op_countUnit: none
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodecommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodecommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodecommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodecommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances cifs:nodecifs_op_count
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlperf-object-get-instances nfsv3:nodecommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodecommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodecommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodecommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_cifs_open_files

    -

    Number of open files over SMB and SMB2

    +

    node_nfs_create_avg_latency

    +

    Average latency of Create procedure requests. The counter keeps track of the average response time of Create requests.

    @@ -36098,72 +40410,56 @@

    node_cifs_open_filesUnit: none
    Type: raw
    Base: -

    + + + - - - - + + + + - -
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodecreate.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances cifs:nodeopen_files
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlRESTapi/cluster/counter/tables/svm_nfs_v41:nodecreate.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    -

    node_cifs_ops

    -

    Number of CIFS operations per second

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42:nodecreate.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/system:nodecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v4:nodecreate.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances system:nodecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv3:nodecreate_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_cifs_read_latency

    -

    Average latency for CIFS read operations

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4_1:nodecreate_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_cifs:nodeaverage_read_latency
    Unit: microsec
    Type: average
    Base: total_read_ops
    conf/restperf/9.12.0/cifs_node.yamlZAPIperf-object-get-instances nfsv4_2:nodecreate_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances cifs:nodecifs_read_latency
    Unit: microsec
    Type: average
    Base: cifs_read_ops
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlperf-object-get-instances nfsv4:nodecreate_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_cifs_read_ops

    -

    Total number of CIFS read operations

    +

    node_nfs_create_session_avg_latency

    +

    Average latency of CREATE_SESSION operations.

    @@ -36176,20 +40472,32 @@

    node_cifs_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodecreate_session.average_latency
    Unit: microsec
    Type: average
    Base: create_session.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodecreate_session.average_latency
    Unit: microsec
    Type: average
    Base: create_session.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances cifs:nodecifs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlperf-object-get-instances nfsv4_1:nodecreate_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_session_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodecreate_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_session_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_cifs_total_ops

    -

    Total number of CIFS operations

    +

    node_nfs_create_session_total

    +

    Total number of CREATE_SESSION operations.

    @@ -36202,20 +40510,32 @@

    node_cifs_total_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodecreate_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodecreate_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances cifs:nodecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlperf-object-get-instances nfsv4_1:nodecreate_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodecreate_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_cifs_write_latency

    -

    Average latency for CIFS write operations

    +

    node_nfs_create_total

    +

    Total number Create of procedure requests. It is the total number of create success and create error requests.

    @@ -36228,72 +40548,56 @@

    node_cifs_write_latency

    - - - + + + - - - - + + + + - -
    RESTapi/cluster/counter/tables/svm_cifs:nodeaverage_write_latency
    Unit: microsec
    Type: average
    Base: total_write_ops
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodecreate.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances cifs:nodecifs_write_latency
    Unit: microsec
    Type: average
    Base: cifs_write_ops
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlRESTapi/cluster/counter/tables/svm_nfs_v41:nodecreate.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    -

    node_cifs_write_ops

    -

    Total number of CIFS write operations

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42:nodecreate.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_cifs:nodetotal_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/cifs_node.yamlapi/cluster/counter/tables/svm_nfs_v4:nodecreate.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances cifs:nodecifs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_node.yamlperf-object-get-instances nfsv3:nodecreate_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_cpu_busy

    -

    System CPU resource utilization. Returns a computed percentage for the default CPU field. Basically computes a 'cpu usage summary' value which indicates how 'busy' the system is based upon the most heavily utilized domain. The idea is to determine the amount of available CPU until we're limited by either a domain maxing out OR we exhaust all available idle CPU cycles, whichever occurs first.

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4_1:nodecreate_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/system:nodecpu_busy
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/restperf/9.12.0/system_node.yamlZAPIperf-object-get-instances nfsv4_2:nodecreate_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances system:nodecpu_busy
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4:nodecreate_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_cpu_busytime

    -

    The time (in hundredths of a second) that the CPU has been doing useful work since the last boot

    +

    node_nfs_delegpurge_avg_latency

    +

    Average latency of DELEGPURGE operations.

    @@ -36306,20 +40610,44 @@

    node_cpu_busytimeUnit: microsec
    Type: average
    Base: delegpurge.total +

    + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedelegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodedelegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIsystem-node-get-iternode-details-info.cpu-busytimeconf/zapi/cdot/9.8.0/node.yamlperf-object-get-instances nfsv4_1:nodedelegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodedelegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodedelegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_cpu_domain_busy

    -

    Array of processor time in percentage spent in various domains

    +

    node_nfs_delegpurge_total

    +

    Total number of DELEGPURGE operations.

    @@ -36332,20 +40660,44 @@

    node_cpu_domain_busyUnit: percent
    Type: percent
    Base: cpu_elapsed_time -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodedelegpurge.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedelegpurge.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodedelegpurge.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances system:nodedomain_busy
    Unit: percent
    Type: percent
    Base: cpu_elapsed_time
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodedelegpurge_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodedelegpurge_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodedelegpurge_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_cpu_elapsed_time

    -

    Elapsed time since boot

    +

    node_nfs_delegreturn_avg_latency

    +

    Average latency of DELEGRETURN operations.

    @@ -36358,20 +40710,44 @@

    node_cpu_elapsed_timeUnit: microsec
    Type: delta
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodedelegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedelegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodedelegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances system:nodecpu_elapsed_time
    Unit: none
    Type: delta,no-display
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodedelegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodedelegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodedelegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_disk_busy

    -

    The utilization percent of the disk. node_disk_busy is disk_busy aggregated by node.

    +

    node_nfs_delegreturn_total

    +

    Total number of DELEGRETURN operations.

    @@ -36384,20 +40760,44 @@

    node_disk_busyUnit: percent
    Type: percent
    Base: base_for_disk_busy -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodedelegreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedelegreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodedelegreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodedelegreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodedelegreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodedelegreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_disk_capacity

    -

    Disk capacity in MB. node_disk_capacity is disk_capacity aggregated by node.

    +

    node_nfs_destroy_clientid_avg_latency

    +

    Average latency of DESTROY_CLIENTID operations.

    @@ -36410,20 +40810,32 @@

    node_disk_capacityUnit: mb
    Type: raw
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodedestroy_clientid.average_latency
    Unit: microsec
    Type: average
    Base: destroy_clientid.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedestroy_clientid.average_latency
    Unit: microsec
    Type: average
    Base: destroy_clientid.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodedestroy_clientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_clientid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodedestroy_clientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_clientid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_cp_read_chain

    -

    Average number of blocks transferred in each consistency point read operation during a CP. node_disk_cp_read_chain is disk_cp_read_chain aggregated by node.

    +

    node_nfs_destroy_clientid_total

    +

    Total number of DESTROY_CLIENTID operations.

    @@ -36436,20 +40848,32 @@

    node_disk_cp_read_chain

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodedestroy_clientid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedestroy_clientid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodedestroy_clientid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodedestroy_clientid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_cp_read_latency

    -

    Average latency per block in microseconds for consistency point read operations. node_disk_cp_read_latency is disk_cp_read_latency aggregated by node.

    +

    node_nfs_destroy_session_avg_latency

    +

    Average latency of DESTROY_SESSION operations.

    @@ -36462,20 +40886,32 @@

    node_disk_cp_read_latencyUnit: microsec
    Type: average
    Base: cp_read_blocks -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodedestroy_session.average_latency
    Unit: microsec
    Type: average
    Base: destroy_session.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedestroy_session.average_latency
    Unit: microsec
    Type: average
    Base: destroy_session.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodedestroy_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_session_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodedestroy_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_session_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_cp_reads

    -

    Number of disk read operations initiated each second for consistency point processing. node_disk_cp_reads is disk_cp_reads aggregated by node.

    +

    node_nfs_destroy_session_total

    +

    Total number of DESTROY_SESSION operations.

    @@ -36488,20 +40924,32 @@

    node_disk_cp_readsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodedestroy_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedestroy_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodedestroy_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodedestroy_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_data_read

    -

    Number of disk kilobytes (KB) read per second

    +

    node_nfs_exchange_id_avg_latency

    +

    Average latency of EXCHANGE_ID operations.

    @@ -36514,20 +40962,32 @@

    node_disk_data_readUnit: kb_per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodeexchange_id.average_latency
    Unit: microsec
    Type: average
    Base: exchange_id.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeexchange_id.average_latency
    Unit: microsec
    Type: average
    Base: exchange_id.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances system:nodedisk_data_read
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodeexchange_id_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: exchange_id_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeexchange_id_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: exchange_id_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_data_written

    -

    Number of disk kilobytes (KB) written per second

    +

    node_nfs_exchange_id_total

    +

    Total number of EXCHANGE_ID operations.

    @@ -36540,20 +41000,32 @@

    node_disk_data_written

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/system:nodedisk_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodeexchange_id.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeexchange_id.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances system:nodedisk_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodeexchange_id_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeexchange_id_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_io_pending

    -

    Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_io_pending is disk_io_pending aggregated by node.

    +

    node_nfs_free_stateid_avg_latency

    +

    Average latency of FREE_STATEID operations.

    @@ -36566,20 +41038,32 @@

    node_disk_io_pendingUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodefree_stateid.average_latency
    Unit: microsec
    Type: average
    Base: free_stateid.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodefree_stateid.average_latency
    Unit: microsec
    Type: average
    Base: free_stateid.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodefree_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: free_stateid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodefree_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: free_stateid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_io_queued

    -

    Number of I/Os queued to the disk but not yet issued. node_disk_io_queued is disk_io_queued aggregated by node.

    +

    node_nfs_free_stateid_total

    +

    Total number of FREE_STATEID operations.

    @@ -36592,20 +41076,32 @@

    node_disk_io_queuedUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodefree_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodefree_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodefree_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodefree_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_max_busy

    -

    The utilization percent of the disk. node_disk_max_busy is the maximum of disk_busy for label node.

    +

    node_nfs_fsinfo_avg_latency

    +

    Average latency of FSInfo procedure requests. The counter keeps track of the average response time of FSInfo requests.

    @@ -36618,20 +41114,20 @@

    node_disk_max_busyUnit: percent
    Type: percent
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3:nodefsinfo.average_latency
    Unit: microsec
    Type: average
    Base: fsinfo.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3:nodefsinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: fsinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_disk_max_capacity

    -

    Disk capacity in MB. node_disk_max_capacity is the maximum of disk_capacity for label node.

    +

    node_nfs_fsinfo_total

    +

    Total number FSInfo of procedure requests. It is the total number of FSInfo success and FSInfo error requests.

    @@ -36644,20 +41140,20 @@

    node_disk_max_capacity

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3:nodefsinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3:nodefsinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_disk_max_cp_read_chain

    -

    Average number of blocks transferred in each consistency point read operation during a CP. node_disk_max_cp_read_chain is the maximum of disk_cp_read_chain for label node.

    +

    node_nfs_fsstat_avg_latency

    +

    Average latency of FSStat procedure requests. The counter keeps track of the average response time of FSStat requests.

    @@ -36670,20 +41166,20 @@

    node_disk_max_cp_read_chainUnit: none
    Type: average
    Base: cp_read_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3:nodefsstat.average_latency
    Unit: microsec
    Type: average
    Base: fsstat.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3:nodefsstat_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: fsstat_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_disk_max_cp_read_latency

    -

    Average latency per block in microseconds for consistency point read operations. node_disk_max_cp_read_latency is the maximum of disk_cp_read_latency for label node.

    +

    node_nfs_fsstat_total

    +

    Total number FSStat of procedure requests. It is the total number of FSStat success and FSStat error requests.

    @@ -36696,20 +41192,20 @@

    node_disk_max_cp_read_latencyUnit: microsec
    Type: average
    Base: cp_read_blocks -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3:nodefsstat.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3:nodefsstat_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_disk_max_cp_reads

    -

    Number of disk read operations initiated each second for consistency point processing. node_disk_max_cp_reads is the maximum of disk_cp_reads for label node.

    +

    node_nfs_get_dir_delegation_avg_latency

    +

    Average latency of GET_DIR_DELEGATION operations.

    @@ -36722,20 +41218,32 @@

    node_disk_max_cp_reads

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodeget_dir_delegation.average_latency
    Unit: microsec
    Type: average
    Base: get_dir_delegation.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeget_dir_delegation.average_latency
    Unit: microsec
    Type: average
    Base: get_dir_delegation.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodeget_dir_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_dir_delegation_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeget_dir_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_dir_delegation_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_max_io_pending

    -

    Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_max_io_pending is the maximum of disk_io_pending for label node.

    +

    node_nfs_get_dir_delegation_total

    +

    Total number of GET_DIR_DELEGATION operations.

    @@ -36748,20 +41256,32 @@

    node_disk_max_io_pending

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodeget_dir_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeget_dir_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodeget_dir_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeget_dir_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_max_io_queued

    -

    Number of I/Os queued to the disk but not yet issued. node_disk_max_io_queued is the maximum of disk_io_queued for label node.

    +

    node_nfs_getattr_avg_latency

    +

    Average latency of GetAttr procedure requests. This counter keeps track of the average response time of GetAttr requests.

    @@ -36774,46 +41294,56 @@

    node_disk_max_io_queued

    - - - + + + - - - - + + + + - -
    RESTapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3:nodegetattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlRESTapi/cluster/counter/tables/svm_nfs_v41:nodegetattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    -

    node_disk_max_total_data

    -

    Total throughput for user operations per second. node_disk_max_total_data is the maximum of disk_total_data for label node.

    - - - - - - + + + + - - - - - + + + - - - + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42:nodegetattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4:nodegetattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3:nodegetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodegetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodegetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_disk_max_total_transfers

    -

    Total number of disk operations involving data transfer initiated per second. node_disk_max_total_transfers is the maximum of disk_total_transfers for label node.

    +

    node_nfs_getattr_total

    +

    Total number of Getattr procedure requests. It is the total number of getattr success and getattr error requests.

    @@ -36826,20 +41356,56 @@

    node_disk_max_total_transfersUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3:nodegetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodegetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodegetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodegetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3:nodegetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodegetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodegetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_disk_max_user_read_blocks

    -

    Number of blocks transferred for user read operations per second. node_disk_max_user_read_blocks is the maximum of disk_user_read_blocks for label node.

    +

    node_nfs_getdeviceinfo_avg_latency

    +

    Average latency of GETDEVICEINFO operations.

    @@ -36852,20 +41418,32 @@

    node_disk_max_user_read_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodegetdeviceinfo.average_latency
    Unit: microsec
    Type: average
    Base: getdeviceinfo.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodegetdeviceinfo.average_latency
    Unit: microsec
    Type: average
    Base: getdeviceinfo.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodegetdeviceinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdeviceinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetdeviceinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdeviceinfo_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_max_user_read_chain

    -

    Average number of blocks transferred in each user read operation. node_disk_max_user_read_chain is the maximum of disk_user_read_chain for label node.

    +

    node_nfs_getdeviceinfo_total

    +

    Total number of GETDEVICEINFO operations.

    @@ -36878,20 +41456,32 @@

    node_disk_max_user_read_chainUnit: none
    Type: average
    Base: user_read_count -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodegetdeviceinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodegetdeviceinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodegetdeviceinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetdeviceinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_max_user_read_latency

    -

    Average latency per block in microseconds for user read operations. node_disk_max_user_read_latency is the maximum of disk_user_read_latency for label node.

    +

    node_nfs_getdevicelist_avg_latency

    +

    Average latency of GETDEVICELIST operations.

    @@ -36904,20 +41494,32 @@

    node_disk_max_user_read_latency

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodegetdevicelist.average_latency
    Unit: microsec
    Type: average
    Base: getdevicelist.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodegetdevicelist.average_latency
    Unit: microsec
    Type: average
    Base: getdevicelist.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodegetdevicelist_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdevicelist_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetdevicelist_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdevicelist_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_max_user_reads

    -

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_max_user_reads is the maximum of disk_user_reads for label node.

    +

    node_nfs_getdevicelist_total

    +

    Total number of GETDEVICELIST operations.

    @@ -36930,20 +41532,32 @@

    node_disk_max_user_reads

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodegetdevicelist.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodegetdevicelist.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodegetdevicelist_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetdevicelist_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_max_user_write_blocks

    -

    Number of blocks transferred for user write operations per second. node_disk_max_user_write_blocks is the maximum of disk_user_write_blocks for label node.

    +

    node_nfs_getfh_avg_latency

    +

    Average latency of GETFH operations.

    @@ -36956,20 +41570,44 @@

    node_disk_max_user_write_blocks

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodegetfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodegetfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodegetfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodegetfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodegetfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_disk_max_user_write_chain

    -

    Average number of blocks transferred in each user write operation. node_disk_max_user_write_chain is the maximum of disk_user_write_chain for label node.

    +

    node_nfs_getfh_total

    +

    Total number of GETFH operations.

    @@ -36982,20 +41620,44 @@

    node_disk_max_user_write_chainUnit: none
    Type: average
    Base: user_write_count -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodegetfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodegetfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodegetfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodegetfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodegetfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_disk_max_user_write_latency

    -

    Average latency per block in microseconds for user write operations. node_disk_max_user_write_latency is the maximum of disk_user_write_latency for label node.

    +

    node_nfs_latency

    +

    Average latency of NFSv3 requests. This counter keeps track of the average response time of NFSv3 requests.

    @@ -37008,72 +41670,56 @@

    node_disk_max_user_write_latency

    - - - + + + - - - - + + + + - -
    RESTapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3:nodelatency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlRESTapi/cluster/counter/tables/svm_nfs_v41:nodelatency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    -

    node_disk_max_user_writes

    -

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_max_user_writes is the maximum of disk_user_writes for label node.

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42:nodelatency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/disk:constituentuser_write_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4:nodelatency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3:nodelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_disk_total_data

    -

    Total throughput for user operations per second. node_disk_total_data is disk_total_data aggregated by node.

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4_1:nodelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlZAPIperf-object-get-instances nfsv4_2:nodelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4:nodelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_disk_total_transfers

    -

    Total number of disk operations involving data transfer initiated per second. node_disk_total_transfers is disk_total_transfers aggregated by node.

    +

    node_nfs_layoutcommit_avg_latency

    +

    Average latency of LAYOUTCOMMIT operations.

    @@ -37086,20 +41732,32 @@

    node_disk_total_transfersUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelayoutcommit.average_latency
    Unit: microsec
    Type: average
    Base: layoutcommit.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelayoutcommit.average_latency
    Unit: microsec
    Type: average
    Base: layoutcommit.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodelayoutcommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutcommit_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelayoutcommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutcommit_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_user_read_blocks

    -

    Number of blocks transferred for user read operations per second. node_disk_user_read_blocks is disk_user_read_blocks aggregated by node.

    +

    node_nfs_layoutcommit_total

    +

    Total number of LAYOUTCOMMIT operations.

    @@ -37112,20 +41770,32 @@

    node_disk_user_read_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelayoutcommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelayoutcommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodelayoutcommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelayoutcommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_user_read_chain

    -

    Average number of blocks transferred in each user read operation. node_disk_user_read_chain is disk_user_read_chain aggregated by node.

    +

    node_nfs_layoutget_avg_latency

    +

    Average latency of LAYOUTGET operations.

    @@ -37138,20 +41808,32 @@

    node_disk_user_read_chainUnit: none
    Type: average
    Base: user_read_count -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelayoutget.average_latency
    Unit: microsec
    Type: average
    Base: layoutget.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelayoutget.average_latency
    Unit: microsec
    Type: average
    Base: layoutget.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodelayoutget_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutget_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelayoutget_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutget_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_user_read_latency

    -

    Average latency per block in microseconds for user read operations. node_disk_user_read_latency is disk_user_read_latency aggregated by node.

    +

    node_nfs_layoutget_total

    +

    Total number of LAYOUTGET operations.

    @@ -37164,20 +41846,32 @@

    node_disk_user_read_latencyUnit: microsec
    Type: average
    Base: user_read_block_count -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelayoutget.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelayoutget.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodelayoutget_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelayoutget_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_user_reads

    -

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_user_reads is disk_user_reads aggregated by node.

    +

    node_nfs_layoutreturn_avg_latency

    +

    Average latency of LAYOUTRETURN operations.

    @@ -37190,20 +41884,32 @@

    node_disk_user_readsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelayoutreturn.average_latency
    Unit: microsec
    Type: average
    Base: layoutreturn.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelayoutreturn.average_latency
    Unit: microsec
    Type: average
    Base: layoutreturn.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodelayoutreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelayoutreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutreturn_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_user_write_blocks

    -

    Number of blocks transferred for user write operations per second. node_disk_user_write_blocks is disk_user_write_blocks aggregated by node.

    +

    node_nfs_layoutreturn_total

    +

    Total number of LAYOUTRETURN operations.

    @@ -37216,20 +41922,32 @@

    node_disk_user_write_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelayoutreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelayoutreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1:nodelayoutreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelayoutreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_disk_user_write_chain

    -

    Average number of blocks transferred in each user write operation. node_disk_user_write_chain is disk_user_write_chain aggregated by node.

    + +

    Average latency of Link procedure requests. The counter keeps track of the average response time of Link requests.

    @@ -37242,72 +41960,56 @@

    node_disk_user_write_chainUnit: none
    Type: average
    Base: user_write_count -

    + + + - - - - + + + + - -
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3:nodelink.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yamlRESTapi/cluster/counter/tables/svm_nfs_v41:nodelink.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    -

    node_disk_user_write_latency

    -

    Average latency per block in microseconds for user write operations. node_disk_user_write_latency is disk_user_write_latency aggregated by node.

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42:nodelink.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4:nodelink.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3:nodelink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_disk_user_writes

    -

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_user_writes is disk_user_writes aggregated by node.

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4_1:nodelink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/disk:constituentuser_write_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlZAPIperf-object-get-instances nfsv4_2:nodelink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4:nodelink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_failed_fan

    -

    Specifies a count of the number of chassis fans that are not operating within the recommended RPM range.

    + +

    Total number Link of procedure requests. It is the total number of Link success and Link error requests.

    @@ -37320,72 +42022,56 @@

    node_failed_fanUnit: none
    Type: rate
    Base: +

    - - - - + + + + - -
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIsystem-node-get-iternode-details-info.env-failed-fan-countconf/zapi/cdot/9.8.0/node.yamlRESTapi/cluster/counter/tables/svm_nfs_v41:nodelink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    -

    node_failed_power

    -

    Number of failed power supply units.

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42:nodelink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/nodescontroller.failed_power_supply.countconf/rest/9.12.0/node.yamlapi/cluster/counter/tables/svm_nfs_v4:nodelink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIsystem-node-get-iternode-details-info.env-failed-power-supply-countconf/zapi/cdot/9.8.0/node.yamlperf-object-get-instances nfsv3:nodelink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_fcp_data_recv

    -

    Number of FCP kilobytes (KB) received per second

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4_1:nodelink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/system:nodefcp_data_received
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yamlZAPIperf-object-get-instances nfsv4_2:nodelink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances system:nodefcp_data_recv
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4:nodelink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_fcp_data_sent

    -

    Number of FCP kilobytes (KB) sent per second

    +

    node_nfs_lock_avg_latency

    +

    Average latency of LOCK operations.

    @@ -37398,20 +42084,44 @@

    node_fcp_data_sentUnit: kb_per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances system:nodefcp_data_sent
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodelock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_fcp_ops

    -

    Number of FCP operations per second

    +

    node_nfs_lock_total

    +

    Total number of LOCK operations.

    @@ -37424,20 +42134,44 @@

    node_fcp_ops

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/system:nodefcp_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelock.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelock.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelock.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances system:nodefcp_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodelock_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelock_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelock_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_hdd_data_read

    -

    Number of HDD Disk kilobytes (KB) read per second

    +

    node_nfs_lockt_avg_latency

    +

    Average latency of LOCKT operations.

    @@ -37450,20 +42184,44 @@

    node_hdd_data_readUnit: kb_per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances system:nodehdd_data_read
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodelockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_hdd_data_written

    -

    Number of HDD kilobytes (KB) written per second

    +

    node_nfs_lockt_total

    +

    Total number of LOCKT operations.

    @@ -37476,46 +42234,44 @@

    node_hdd_data_writtenUnit: kb_per_sec
    Type: rate
    Base: -

    + + + - - - - + + + + - -
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelockt.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances system:nodehdd_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlRESTapi/cluster/counter/tables/svm_nfs_v42:nodelockt.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    -

    node_iscsi_ops

    -

    Number of iSCSI operations per second

    - - - - - - + + + + - - - - - - + + + + - - - + + + + + + + + +
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v4:nodelockt.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    RESTapi/cluster/counter/tables/system:nodeiscsi_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yamlZAPIperf-object-get-instances nfsv4_1:nodelockt_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances system:nodeiscsi_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_2:nodelockt_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelockt_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_memory

    -

    Total memory in megabytes (MB)

    +

    node_nfs_locku_avg_latency

    +

    Average latency of LOCKU operations.

    @@ -37528,46 +42284,44 @@

    node_memory

    - - - + + + - - - - + + + + - -
    RESTapi/cluster/counter/tables/system:nodememory
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelocku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances system:nodememory
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlRESTapi/cluster/counter/tables/svm_nfs_v42:nodelocku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    -

    node_net_data_recv

    -

    Number of network kilobytes (KB) received per second

    - - - - - - + + + + - - - - - - + + + + - - - + + + + + + + + +
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v4:nodelocku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    RESTapi/cluster/counter/tables/system:nodenetwork_data_received
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yamlZAPIperf-object-get-instances nfsv4_1:nodelocku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances system:nodenet_data_recv
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_2:nodelocku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelocku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_net_data_sent

    -

    Number of network kilobytes (KB) sent per second

    +

    node_nfs_locku_total

    +

    Total number of LOCKU operations.

    @@ -37580,20 +42334,44 @@

    node_net_data_sentUnit: kb_per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodelocku.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelocku.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelocku.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances system:nodenet_data_sent
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nfsv4_1:nodelocku_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelocku_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelocku_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_access_avg_latency

    -

    Average latency of Access procedure requests. The counter keeps track of the average response time of Access requests.

    +

    node_nfs_lookup_avg_latency

    +

    Average latency of LookUp procedure requests. This shows the average time it takes for the LookUp operation to reply to the request.

    @@ -37607,55 +42385,55 @@

    node_nfs_access_avg_latencyUnit: microsec
    Type: average
    Base: access.total +

    - + - + - + - + - + - + - +
    lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodeaccess.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeaccess.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodeaccess.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodeaccess_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeaccess_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeaccess_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodeaccess_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_access_total

    -

    Total number of Access procedure requests. It is the total number of access success and access error requests.

    +

    node_nfs_lookup_total

    +

    Total number of Lookup procedure requests. It is the total number of lookup success and lookup error requests.

    @@ -37669,55 +42447,55 @@

    node_nfs_access_totalUnit: none
    Type: rate
    Base: +

    - + - + - + - + - + - + - +
    lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodeaccess.total
    Unit: none
    Type: rate
    Base:
    lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeaccess.total
    Unit: none
    Type: rate
    Base:
    lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodeaccess.total
    Unit: none
    Type: rate
    Base:
    lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodeaccess_total
    Unit: none
    Type: rate
    Base:
    lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeaccess_total
    Unit: none
    Type: rate
    Base:
    lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeaccess_total
    Unit: none
    Type: rate
    Base:
    lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodeaccess_total
    Unit: none
    Type: rate
    Base:
    lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_backchannel_ctl_avg_latency

    -

    Average latency of BACKCHANNEL_CTL operations.

    +

    node_nfs_lookupp_avg_latency

    +

    Average latency of LOOKUPP operations.

    @@ -37731,31 +42509,43 @@

    node_nfs_backchannel_ctl_avg_laten

    - + - + + + + + + + - + - + + + + + + +
    REST api/cluster/counter/tables/svm_nfs_v41:nodebackchannel_ctl.average_latency
    Unit: microsec
    Type: average
    Base: backchannel_ctl.total
    lookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodebackchannel_ctl.average_latency
    Unit: microsec
    Type: average
    Base: backchannel_ctl.total
    lookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodebackchannel_ctl_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: backchannel_ctl_total
    lookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodebackchannel_ctl_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: backchannel_ctl_total
    lookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_backchannel_ctl_total

    -

    Total number of BACKCHANNEL_CTL operations.

    +

    node_nfs_lookupp_total

    +

    Total number of LOOKUPP operations.

    @@ -37769,31 +42559,43 @@

    node_nfs_backchannel_ctl_totalUnit: none
    Type: rate
    Base: +

    - + + + + + + + - + - + + + + + + +
    lookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodebackchannel_ctl.total
    Unit: none
    Type: rate
    Base:
    lookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodebackchannel_ctl_total
    Unit: none
    Type: rate
    Base:
    lookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodebackchannel_ctl_total
    Unit: none
    Type: rate
    Base:
    lookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_bind_conn_to_session_avg_latency

    -

    Average latency of BIND_CONN_TO_SESSION operations.

    +

    node_nfs_mkdir_avg_latency

    +

    Average latency of MkDir procedure requests. The counter keeps track of the average response time of MkDir requests.

    @@ -37806,32 +42608,98 @@

    node_nfs_bind_conn_to_session

    - - - + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodebind_connections_to_session.average_latency
    Unit: microsec
    Type: average
    Base: bind_connections_to_session.total
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodemkdir.average_latency
    Unit: microsec
    Type: average
    Base: mkdir.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodemkdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: mkdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    +

    node_nfs_mkdir_total

    +

    Total number MkDir of procedure requests. It is the total number of MkDir success and MkDir error requests.

    + + + + + + + + + - - - + + + - - - + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodebind_conn_to_session.average_latency
    Unit: microsec
    Type: average
    Base: bind_conn_to_session.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodemkdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodebind_conn_to_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: bind_conn_to_session_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlperf-object-get-instances nfsv3:nodemkdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    +

    node_nfs_mknod_avg_latency

    +

    Average latency of MkNod procedure requests. The counter keeps track of the average response time of MkNod requests.

    + + + + + + + + + + + + + + + - - - + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodemknod.average_latency
    Unit: microsec
    Type: average
    Base: mknod.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodebind_conn_to_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: bind_conn_to_session_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nfsv3:nodemknod_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: mknod_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_bind_conn_to_session_total

    -

    Total number of BIND_CONN_TO_SESSION operations.

    +

    node_nfs_mknod_total

    +

    Total number MkNod of procedure requests. It is the total number of MkNod success and MkNod error requests.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodemknod.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodemknod_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    +

    node_nfs_null_avg_latency

    +

    Average latency of Null procedure requests.

    @@ -37844,32 +42712,56 @@

    node_nfs_bind_conn_to_session_total

    + + + + + + - + - + + + + + + + + + + + + + - + - + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodenull.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodebind_connections_to_session.total
    Unit: none
    Type: rate
    Base:
    null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodebind_conn_to_session.total
    Unit: none
    Type: rate
    Base:
    null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodenull.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenull_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodebind_conn_to_session_total
    Unit: none
    Type: rate
    Base:
    null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodebind_conn_to_session_total
    Unit: none
    Type: rate
    Base:
    null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodenull_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_close_avg_latency

    -

    Average latency of CLOSE operations.

    +

    node_nfs_null_total

    +

    Total number of Null procedure requests. It is the total of null success and null error requests.

    @@ -37882,44 +42774,56 @@

    node_nfs_close_avg_latencyUnit: none
    Type: rate
    Base: +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodeclose.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeclose.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodeclose.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenull_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeclose_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeclose_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodeclose_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_close_total

    -

    Total number of CLOSE operations.

    +

    node_nfs_nverify_avg_latency

    +

    Average latency of NVERIFY operations.

    @@ -37933,43 +42837,43 @@

    node_nfs_close_totalUnit: none
    Type: rate
    Base: +

    - + - + - + - + - +
    nverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeclose.total
    Unit: none
    Type: rate
    Base:
    nverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodeclose.total
    Unit: none
    Type: rate
    Base:
    nverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeclose_total
    Unit: none
    Type: rate
    Base:
    nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeclose_total
    Unit: none
    Type: rate
    Base:
    nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodeclose_total
    Unit: none
    Type: rate
    Base:
    nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_commit_avg_latency

    -

    Average latency of Commit procedure requests. The counter keeps track of the average response time of Commit requests.

    +

    node_nfs_nverify_total

    +

    Total number of NVERIFY operations.

    @@ -37982,56 +42886,44 @@

    node_nfs_commit_avg_latencyUnit: microsec
    Type: average
    Base: commit.total -

    - - - - + - + - + - - - - - - - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodecommit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    nverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodecommit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    nverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodecommit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    nverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodecommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodecommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodecommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodecommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_commit_total

    -

    Total number of Commit procedure requests. It is the total number of Commit success and Commit error requests.

    +

    node_nfs_open_avg_latency

    +

    Average latency of OPEN operations.

    @@ -38044,56 +42936,44 @@

    node_nfs_commit_totalUnit: none
    Type: rate
    Base: -

    - - - - + - + - + - - - - - - - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodecommit.total
    Unit: none
    Type: rate
    Base:
    open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodecommit.total
    Unit: none
    Type: rate
    Base:
    open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodecommit.total
    Unit: none
    Type: rate
    Base:
    open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodecommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodecommit_total
    Unit: none
    Type: rate
    Base:
    open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodecommit_total
    Unit: none
    Type: rate
    Base:
    open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodecommit_total
    Unit: none
    Type: rate
    Base:
    open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_create_avg_latency

    -

    Average latency of Create procedure requests. The counter keeps track of the average response time of Create requests.

    +

    node_nfs_open_confirm_avg_latency

    +

    Average latency of OPEN_CONFIRM procedures

    @@ -38106,56 +42986,46 @@

    node_nfs_create_avg_latencyUnit: microsec
    Type: average
    Base: create.total -

    - - - - - - - - - - - - - - - - + - - - + + + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodecreate.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodecreate.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodecreate.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    open_confirm.average_latency
    Unit: microsec
    Type: average
    Base: open_confirm.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodecreate_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nfsv4:nodeopen_confirm_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_confirm_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    +

    node_nfs_open_confirm_total

    +

    Total number of OPEN_CONFIRM procedures

    + + - - - - + + + + + + - - - - + + + + - +
    ZAPIperf-object-get-instances nfsv4_1:nodecreate_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodecreate_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/svm_nfs_v4:nodeopen_confirm.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodecreate_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    open_confirm_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_create_session_avg_latency

    -

    Average latency of CREATE_SESSION operations.

    +

    node_nfs_open_downgrade_avg_latency

    +

    Average latency of OPEN_DOWNGRADE operations.

    @@ -38169,31 +43039,43 @@

    node_nfs_create_session_avg_latency

    - + - + + + + + + + - + - + + + + + + +
    REST api/cluster/counter/tables/svm_nfs_v41:nodecreate_session.average_latency
    Unit: microsec
    Type: average
    Base: create_session.total
    open_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodecreate_session.average_latency
    Unit: microsec
    Type: average
    Base: create_session.total
    open_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeopen_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodecreate_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_session_total
    open_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodecreate_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_session_total
    open_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeopen_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_create_session_total

    -

    Total number of CREATE_SESSION operations.

    +

    node_nfs_open_downgrade_total

    +

    Total number of OPEN_DOWNGRADE operations.

    @@ -38207,31 +43089,43 @@

    node_nfs_create_session_totalUnit: none
    Type: rate
    Base: +

    - + + + + + + + - + - + + + + + + +
    open_downgrade.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodecreate_session.total
    Unit: none
    Type: rate
    Base:
    open_downgrade.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeopen_downgrade.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodecreate_session_total
    Unit: none
    Type: rate
    Base:
    open_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodecreate_session_total
    Unit: none
    Type: rate
    Base:
    open_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeopen_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_create_total

    -

    Total number Create of procedure requests. It is the total number of create success and create error requests.

    +

    node_nfs_open_total

    +

    Total number of OPEN operations.

    @@ -38244,56 +43138,44 @@

    node_nfs_create_totalUnit: none
    Type: rate
    Base: -

    - - - - + - + - + - - - - - - - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodecreate.total
    Unit: none
    Type: rate
    Base:
    open.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodecreate.total
    Unit: none
    Type: rate
    Base:
    open.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodecreate.total
    Unit: none
    Type: rate
    Base:
    open.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodecreate_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodecreate_total
    Unit: none
    Type: rate
    Base:
    open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodecreate_total
    Unit: none
    Type: rate
    Base:
    open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodecreate_total
    Unit: none
    Type: rate
    Base:
    open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_delegpurge_avg_latency

    -

    Average latency of DELEGPURGE operations.

    +

    node_nfs_openattr_avg_latency

    +

    Average latency of OPENATTR operations.

    @@ -38307,43 +43189,43 @@

    node_nfs_delegpurge_avg_latency

    - + - + - + - + - + - +
    REST api/cluster/counter/tables/svm_nfs_v41:nodedelegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    openattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodedelegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    openattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodedelegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    openattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodedelegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodedelegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodedelegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_delegpurge_total

    -

    Total number of DELEGPURGE operations.

    +

    node_nfs_openattr_total

    +

    Total number of OPENATTR operations.

    @@ -38357,43 +43239,43 @@

    node_nfs_delegpurge_totalUnit: none
    Type: rate
    Base: +

    - + - + - + - + - +
    openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodedelegpurge.total
    Unit: none
    Type: rate
    Base:
    openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodedelegpurge.total
    Unit: none
    Type: rate
    Base:
    openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodedelegpurge_total
    Unit: none
    Type: rate
    Base:
    openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodedelegpurge_total
    Unit: none
    Type: rate
    Base:
    openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodedelegpurge_total
    Unit: none
    Type: rate
    Base:
    openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_delegreturn_avg_latency

    -

    Average latency of DELEGRETURN operations.

    +

    node_nfs_ops

    +

    Number of NFS operations per second

    @@ -38406,44 +43288,72 @@

    node_nfs_delegreturn_avg_latency

    - - - + + + - - - - + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodedelegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/system:nodenfs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodedelegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlZAPIperf-object-get-instances system:nodenfs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    +

    node_nfs_pathconf_avg_latency

    +

    Average latency of PathConf procedure requests. The counter keeps track of the average response time of PathConf requests.

    + + + + + + + + + - - - + + + - - - + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodedelegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodepathconf.average_latency
    Unit: microsec
    Type: average
    Base: pathconf.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodedelegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlperf-object-get-instances nfsv3:nodepathconf_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: pathconf_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    +

    node_nfs_pathconf_total

    +

    Total number PathConf of procedure requests. It is the total number of PathConf success and PathConf error requests.

    + + - - - - + + + + + + + + + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_2:nodedelegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodepathconf.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodedelegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nfsv3:nodepathconf_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_delegreturn_total

    -

    Total number of DELEGRETURN operations.

    +

    node_nfs_putfh_avg_latency

    +

    The number of successful PUTPUBFH operations.

    @@ -38457,43 +43367,43 @@

    node_nfs_delegreturn_totalUnit: none
    Type: rate
    Base: +

    - + - + - + - + - +
    putfh.average_latency
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodedelegreturn.total
    Unit: none
    Type: rate
    Base:
    putfh.average_latency
    Unit: microsec
    Type: average
    Base: putfh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodedelegreturn.total
    Unit: none
    Type: rate
    Base:
    putfh.average_latency
    Unit: microsec
    Type: average
    Base: putfh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodedelegreturn_total
    Unit: none
    Type: rate
    Base:
    putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodedelegreturn_total
    Unit: none
    Type: rate
    Base:
    putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodedelegreturn_total
    Unit: none
    Type: rate
    Base:
    putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_destroy_clientid_avg_latency

    -

    Average latency of DESTROY_CLIENTID operations.

    +

    node_nfs_putfh_total

    +

    Total number of PUTFH operations.

    @@ -38507,31 +43417,43 @@

    node_nfs_destroy_clientid_avg_lat

    - + - + + + + + + + - + - + + + + + + +
    REST api/cluster/counter/tables/svm_nfs_v41:nodedestroy_clientid.average_latency
    Unit: microsec
    Type: average
    Base: destroy_clientid.total
    putfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodedestroy_clientid.average_latency
    Unit: microsec
    Type: average
    Base: destroy_clientid.total
    putfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodedestroy_clientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_clientid_total
    putfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodedestroy_clientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_clientid_total
    putfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_destroy_clientid_total

    -

    Total number of DESTROY_CLIENTID operations.

    +

    node_nfs_putpubfh_avg_latency

    +

    Average latency of PUTPUBFH operations.

    @@ -38545,31 +43467,43 @@

    node_nfs_destroy_clientid_total

    - + - + + + + + + + - + - + + + + + + +
    REST api/cluster/counter/tables/svm_nfs_v41:nodedestroy_clientid.total
    Unit: none
    Type: rate
    Base:
    putpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodedestroy_clientid.total
    Unit: none
    Type: rate
    Base:
    putpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodedestroy_clientid_total
    Unit: none
    Type: rate
    Base:
    putpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodedestroy_clientid_total
    Unit: none
    Type: rate
    Base:
    putpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_destroy_session_avg_latency

    -

    Average latency of DESTROY_SESSION operations.

    +

    node_nfs_putpubfh_total

    +

    Total number of PUTPUBFH operations.

    @@ -38583,31 +43517,43 @@

    node_nfs_destroy_session_avg_laten

    - + - + + + + + + + - + - + + + + + + +
    REST api/cluster/counter/tables/svm_nfs_v41:nodedestroy_session.average_latency
    Unit: microsec
    Type: average
    Base: destroy_session.total
    putpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodedestroy_session.average_latency
    Unit: microsec
    Type: average
    Base: destroy_session.total
    putpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodedestroy_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_session_total
    putpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodedestroy_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_session_total
    putpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_destroy_session_total

    -

    Total number of DESTROY_SESSION operations.

    +

    node_nfs_putrootfh_avg_latency

    +

    Average latency of PUTROOTFH operations.

    @@ -38621,31 +43567,43 @@

    node_nfs_destroy_session_totalUnit: none
    Type: rate
    Base: +

    - + + + + + + + - + - + + + + + + +
    putrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodedestroy_session.total
    Unit: none
    Type: rate
    Base:
    putrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodedestroy_session_total
    Unit: none
    Type: rate
    Base:
    putrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodedestroy_session_total
    Unit: none
    Type: rate
    Base:
    putrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_exchange_id_avg_latency

    -

    Average latency of EXCHANGE_ID operations.

    +

    node_nfs_putrootfh_total

    +

    Total number of PUTROOTFH operations.

    @@ -38659,31 +43617,43 @@

    node_nfs_exchange_id_avg_latency

    - + - + + + + + + + - + - + + + + + + +
    REST api/cluster/counter/tables/svm_nfs_v41:nodeexchange_id.average_latency
    Unit: microsec
    Type: average
    Base: exchange_id.total
    putrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeexchange_id.average_latency
    Unit: microsec
    Type: average
    Base: exchange_id.total
    putrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeexchange_id_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: exchange_id_total
    putrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeexchange_id_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: exchange_id_total
    putrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_exchange_id_total

    -

    Total number of EXCHANGE_ID operations.

    +

    node_nfs_read_avg_latency

    +

    Average latency of Read procedure requests. The counter keeps track of the average response time of Read requests.

    @@ -38696,32 +43666,56 @@

    node_nfs_exchange_id_totalUnit: microsec
    Type: average
    Base: read.total +

    + + + - + - + + + + + + + + + + + + + - + - + + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodeexchange_id.total
    Unit: none
    Type: rate
    Base:
    read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeexchange_id.total
    Unit: none
    Type: rate
    Base:
    read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderead.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderead_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeexchange_id_total
    Unit: none
    Type: rate
    Base:
    read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeexchange_id_total
    Unit: none
    Type: rate
    Base:
    read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:noderead_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_free_stateid_avg_latency

    -

    Average latency of FREE_STATEID operations.

    +

    node_nfs_read_ops

    +

    Total observed NFSv3 read operations per second.

    @@ -38734,32 +43728,20 @@

    node_nfs_free_stateid_avg_latency

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodefree_stateid.average_latency
    Unit: microsec
    Type: average
    Base: free_stateid.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodefree_stateid.average_latency
    Unit: microsec
    Type: average
    Base: free_stateid.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodefree_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: free_stateid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/svm_nfs_v3:noderead_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodefree_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: free_stateid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nfsv3:nodenfsv3_read_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_free_stateid_total

    -

    Total number of FREE_STATEID operations.

    + +

    Average latency of ReadSymLink procedure requests. The counter keeps track of the average response time of ReadSymLink requests.

    @@ -38772,32 +43754,20 @@

    node_nfs_free_stateid_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodefree_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodefree_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/svm_nfs_v3:noderead_symlink.average_latency
    Unit: microsec
    Type: average
    Base: read_symlink.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodefree_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nfsv3:noderead_symlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_symlink_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_fsinfo_avg_latency

    -

    Average latency of FSInfo procedure requests. The counter keeps track of the average response time of FSInfo requests.

    + +

    Total number of ReadSymLink procedure requests. It is the total number of read symlink success and read symlink error requests.

    @@ -38811,19 +43781,19 @@

    node_nfs_fsinfo_avg_latencyUnit: microsec
    Type: average
    Base: fsinfo.total +

    - +
    read_symlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodefsinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: fsinfo_total
    read_symlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_fsinfo_total

    -

    Total number FSInfo of procedure requests. It is the total number of FSInfo success and FSInfo error requests.

    +

    node_nfs_read_throughput

    +

    Rate of NFSv3 read data transfers per second.

    @@ -38837,19 +43807,55 @@

    node_nfs_fsinfo_totalUnit: none
    Type: rate
    Base: +

    + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + +
    read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodetotal.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodetotal.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodetotal.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodefsinfo_total
    Unit: none
    Type: rate
    Base:
    nfsv3_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodenfs41_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodenfs42_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodenfs4_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_fsstat_avg_latency

    -

    Average latency of FSStat procedure requests. The counter keeps track of the average response time of FSStat requests.

    +

    node_nfs_read_total

    +

    Total number Read of procedure requests. It is the total number of read success and read error requests.

    @@ -38863,19 +43869,55 @@

    node_nfs_fsstat_avg_latencyUnit: microsec
    Type: average
    Base: fsstat.total +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + +
    read.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:noderead.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderead.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderead.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderead_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:noderead_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:noderead_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodefsstat_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: fsstat_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nfsv4:noderead_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_fsstat_total

    -

    Total number FSStat of procedure requests. It is the total number of FSStat success and FSStat error requests.

    +

    node_nfs_readdir_avg_latency

    +

    Average latency of ReadDir procedure requests. The counter keeps track of the average response time of ReadDir requests.

    @@ -38889,19 +43931,55 @@

    node_nfs_fsstat_totalUnit: none
    Type: rate
    Base: +

    + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + +
    readdir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodereaddir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodereaddir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodereaddir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodefsstat_total
    Unit: none
    Type: rate
    Base:
    readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodereaddir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodereaddir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodereaddir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_get_dir_delegation_avg_latency

    -

    Average latency of GET_DIR_DELEGATION operations.

    +

    node_nfs_readdir_total

    +

    Total number ReadDir of procedure requests. It is the total number of ReadDir success and ReadDir error requests.

    @@ -38914,32 +43992,56 @@

    node_nfs_get_dir_delegation_avg

    + + + + + + - + - + + + + + + + + + + + + + - + - + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodereaddir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodeget_dir_delegation.average_latency
    Unit: microsec
    Type: average
    Base: get_dir_delegation.total
    readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeget_dir_delegation.average_latency
    Unit: microsec
    Type: average
    Base: get_dir_delegation.total
    readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodereaddir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodereaddir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeget_dir_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_dir_delegation_total
    readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeget_dir_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_dir_delegation_total
    readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodereaddir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_get_dir_delegation_total

    -

    Total number of GET_DIR_DELEGATION operations.

    +

    node_nfs_readdirplus_avg_latency

    +

    Average latency of ReadDirPlus procedure requests. The counter keeps track of the average response time of ReadDirPlus requests.

    @@ -38952,32 +44054,20 @@

    node_nfs_get_dir_delegation_total

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodeget_dir_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeget_dir_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeget_dir_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodereaddirplus.average_latency
    Unit: microsec
    Type: average
    Base: readdirplus.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeget_dir_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nfsv3:nodereaddirplus_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdirplus_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_getattr_avg_latency

    -

    Average latency of GetAttr procedure requests. This counter keeps track of the average response time of GetAttr requests.

    +

    node_nfs_readdirplus_total

    +

    Total number ReadDirPlus of procedure requests. It is the total number of ReadDirPlus success and ReadDirPlus error requests.

    @@ -38991,55 +44081,69 @@

    node_nfs_getattr_avg_latencyUnit: microsec
    Type: average
    Base: getattr.total +

    + + + + + + +
    readdirplus.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodereaddirplus_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    + +

    Average latency of READLINK operations.

    + + + + + + + + + + + - + - + - + - - - - - - - + - + - +
    APIEndpointMetricTemplate
    REST api/cluster/counter/tables/svm_nfs_v41:nodegetattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    readlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodegetattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    readlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodegetattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    readlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodegetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodegetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodegetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodegetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_getattr_total

    -

    Total number of Getattr procedure requests. It is the total number of getattr success and getattr error requests.

    + +

    Total number of READLINK operations.

    @@ -39052,56 +44156,44 @@

    node_nfs_getattr_total

    - - - - - - - + - + - + - - - - - - - + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodegetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodegetattr.total
    Unit: none
    Type: rate
    Base:
    readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodegetattr.total
    Unit: none
    Type: rate
    Base:
    readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodegetattr.total
    Unit: none
    Type: rate
    Base:
    readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodegetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodegetattr_total
    Unit: none
    Type: rate
    Base:
    readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodegetattr_total
    Unit: none
    Type: rate
    Base:
    readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodegetattr_total
    Unit: none
    Type: rate
    Base:
    readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_getdeviceinfo_avg_latency

    -

    Average latency of GETDEVICEINFO operations.

    +

    node_nfs_reclaim_complete_avg_latency

    +

    Average latency of RECLAIM_COMPLETE operations.

    @@ -39115,31 +44207,31 @@

    node_nfs_getdeviceinfo_avg_latency

    - + - + - + - +
    REST api/cluster/counter/tables/svm_nfs_v41:nodegetdeviceinfo.average_latency
    Unit: microsec
    Type: average
    Base: getdeviceinfo.total
    reclaim_complete.average_latency
    Unit: microsec
    Type: average
    Base: reclaim_complete.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodegetdeviceinfo.average_latency
    Unit: microsec
    Type: average
    Base: getdeviceinfo.total
    reclaim_complete.average_latency
    Unit: microsec
    Type: average
    Base: reclaim_complete.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodegetdeviceinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdeviceinfo_total
    reclaim_complete_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: reclaim_complete_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodegetdeviceinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdeviceinfo_total
    reclaim_complete_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: reclaim_complete_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_nfs_getdeviceinfo_total

    -

    Total number of GETDEVICEINFO operations.

    +

    node_nfs_reclaim_complete_total

    +

    Total number of RECLAIM_COMPLETE operations.

    @@ -39153,31 +44245,31 @@

    node_nfs_getdeviceinfo_totalUnit: none
    Type: rate
    Base: +

    - + - + - +
    reclaim_complete.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodegetdeviceinfo.total
    Unit: none
    Type: rate
    Base:
    reclaim_complete.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodegetdeviceinfo_total
    Unit: none
    Type: rate
    Base:
    reclaim_complete_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodegetdeviceinfo_total
    Unit: none
    Type: rate
    Base:
    reclaim_complete_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_nfs_getdevicelist_avg_latency

    -

    Average latency of GETDEVICELIST operations.

    +

    node_nfs_release_lock_owner_avg_latency

    +

    Average Latency of RELEASE_LOCKOWNER procedures

    @@ -39190,32 +44282,46 @@

    node_nfs_getdevicelist_avg_latency

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodegetdevicelist.average_latency
    Unit: microsec
    Type: average
    Base: getdevicelist.total
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/svm_nfs_v4:noderelease_lock_owner.average_latency
    Unit: microsec
    Type: average
    Base: release_lock_owner.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodegetdevicelist.average_latency
    Unit: microsec
    Type: average
    Base: getdevicelist.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlZAPIperf-object-get-instances nfsv4:noderelease_lock_owner_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: release_lock_owner_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    +

    node_nfs_release_lock_owner_total

    +

    Total number of RELEASE_LOCKOWNER procedures

    + + - - - - + + + + + + + + + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodegetdevicelist_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdevicelist_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderelease_lock_owner.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodegetdevicelist_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdevicelist_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nfsv4:noderelease_lock_owner_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_getdevicelist_total

    -

    Total number of GETDEVICELIST operations.

    +

    node_nfs_remove_avg_latency

    +

    Average latency of Remove procedure requests. The counter keeps track of the average response time of Remove requests.

    @@ -39228,32 +44334,56 @@

    node_nfs_getdevicelist_totalUnit: microsec
    Type: average
    Base: remove.total +

    + + + - + - + + + + + + + + + + + + + - + - + + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodegetdevicelist.total
    Unit: none
    Type: rate
    Base:
    remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodegetdevicelist.total
    Unit: none
    Type: rate
    Base:
    remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderemove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderemove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodegetdevicelist_total
    Unit: none
    Type: rate
    Base:
    remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodegetdevicelist_total
    Unit: none
    Type: rate
    Base:
    remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:noderemove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_getfh_avg_latency

    -

    Average latency of GETFH operations.

    +

    node_nfs_remove_total

    +

    Total number Remove of procedure requests. It is the total number of Remove success and Remove error requests.

    @@ -39266,44 +44396,56 @@

    node_nfs_getfh_avg_latencyUnit: none
    Type: rate
    Base: +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodegetfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodegetfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodegetfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderemove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodegetfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodegetfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodegetfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_getfh_total

    -

    Total number of GETFH operations.

    +

    node_nfs_rename_avg_latency

    +

    Average latency of Rename procedure requests. The counter keeps track of the average response time of Rename requests.

    @@ -39316,44 +44458,56 @@

    node_nfs_getfh_totalUnit: microsec
    Type: average
    Base: rename.total +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodegetfh.total
    Unit: none
    Type: rate
    Base:
    rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodegetfh.total
    Unit: none
    Type: rate
    Base:
    rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodegetfh.total
    Unit: none
    Type: rate
    Base:
    rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodegetfh_total
    Unit: none
    Type: rate
    Base:
    rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodegetfh_total
    Unit: none
    Type: rate
    Base:
    rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodegetfh_total
    Unit: none
    Type: rate
    Base:
    rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_latency

    -

    Average latency of NFSv3 requests. This counter keeps track of the average response time of NFSv3 requests.

    +

    node_nfs_rename_total

    +

    Total number Rename of procedure requests. It is the total number of Rename success and Rename error requests.

    @@ -39367,55 +44521,55 @@

    node_nfs_latencyUnit: microsec
    Type: average
    Base: total_ops +

    - + - + - + - + - + - + - +
    rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodelatency
    Unit: microsec
    Type: average
    Base: total_ops
    rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelatency
    Unit: microsec
    Type: average
    Base: total_ops
    rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodelatency
    Unit: microsec
    Type: average
    Base: total_ops
    rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_layoutcommit_avg_latency

    -

    Average latency of LAYOUTCOMMIT operations.

    +

    node_nfs_renew_avg_latency

    +

    Average latency of RENEW procedures

    @@ -39428,32 +44582,46 @@

    node_nfs_layoutcommit_avg_latency

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodelayoutcommit.average_latency
    Unit: microsec
    Type: average
    Base: layoutcommit.total
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/svm_nfs_v4:noderenew.average_latency
    Unit: microsec
    Type: average
    Base: renew.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelayoutcommit.average_latency
    Unit: microsec
    Type: average
    Base: layoutcommit.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlZAPIperf-object-get-instances nfsv4:noderenew_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: renew_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    +

    node_nfs_renew_total

    +

    Total number of RENEW procedures

    + + - - - - + + + + + + + + + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodelayoutcommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutcommit_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderenew.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelayoutcommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutcommit_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nfsv4:noderenew_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_layoutcommit_total

    -

    Total number of LAYOUTCOMMIT operations.

    +

    node_nfs_restorefh_avg_latency

    +

    Average latency of RESTOREFH operations.

    @@ -39467,31 +44635,43 @@

    node_nfs_layoutcommit_totalUnit: none
    Type: rate
    Base: +

    - + + + + + + + - + - + + + + + + +
    restorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelayoutcommit.total
    Unit: none
    Type: rate
    Base:
    restorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderestorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelayoutcommit_total
    Unit: none
    Type: rate
    Base:
    restorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelayoutcommit_total
    Unit: none
    Type: rate
    Base:
    restorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:noderestorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_layoutget_avg_latency

    -

    Average latency of LAYOUTGET operations.

    +

    node_nfs_restorefh_total

    +

    Total number of RESTOREFH operations.

    @@ -39505,31 +44685,43 @@

    node_nfs_layoutget_avg_latencyUnit: microsec
    Type: average
    Base: layoutget.total +

    - + + + + + + + - + - + + + + + + +
    restorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelayoutget.average_latency
    Unit: microsec
    Type: average
    Base: layoutget.total
    restorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderestorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelayoutget_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutget_total
    restorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelayoutget_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutget_total
    restorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:noderestorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_layoutget_total

    -

    Total number of LAYOUTGET operations.

    +

    node_nfs_rmdir_avg_latency

    +

    Average latency of RmDir procedure requests. The counter keeps track of the average response time of RmDir requests.

    @@ -39542,32 +44734,46 @@

    node_nfs_layoutget_total

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodelayoutget.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/svm_nfs_v3:nodermdir.average_latency
    Unit: microsec
    Type: average
    Base: rmdir.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelayoutget.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlZAPIperf-object-get-instances nfsv3:nodermdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rmdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    +

    node_nfs_rmdir_total

    +

    Total number RmDir of procedure requests. It is the total number of RmDir success and RmDir error requests.

    + + - - - - + + + + + + + + + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodelayoutget_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodermdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelayoutget_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nfsv3:nodermdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_layoutreturn_avg_latency

    -

    Average latency of LAYOUTRETURN operations.

    +

    node_nfs_savefh_avg_latency

    +

    Average latency of SAVEFH operations.

    @@ -39581,31 +44787,43 @@

    node_nfs_layoutreturn_avg_latency

    - + - + + + + + + + - + - + + + + + + +
    REST api/cluster/counter/tables/svm_nfs_v41:nodelayoutreturn.average_latency
    Unit: microsec
    Type: average
    Base: layoutreturn.total
    savefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelayoutreturn.average_latency
    Unit: microsec
    Type: average
    Base: layoutreturn.total
    savefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesavefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelayoutreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutreturn_total
    savefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelayoutreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutreturn_total
    savefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodesavefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_layoutreturn_total

    -

    Total number of LAYOUTRETURN operations.

    +

    node_nfs_savefh_total

    +

    Total number of SAVEFH operations.

    @@ -39619,31 +44837,43 @@

    node_nfs_layoutreturn_totalUnit: none
    Type: rate
    Base: +

    - + + + + + + + - + - + + + + + + +
    savefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelayoutreturn.total
    Unit: none
    Type: rate
    Base:
    savefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesavefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelayoutreturn_total
    Unit: none
    Type: rate
    Base:
    savefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelayoutreturn_total
    Unit: none
    Type: rate
    Base:
    savefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodesavefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    - -

    Average latency of Link procedure requests. The counter keeps track of the average response time of Link requests.

    +

    node_nfs_secinfo_avg_latency

    +

    Average latency of SECINFO operations.

    @@ -39656,56 +44886,44 @@ - - - - + - + - + - - - - - - - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodelink.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    secinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelink.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    secinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodelink.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    secinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodelink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodelink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    - -

    Total number Link of procedure requests. It is the total number of Link success and Link error requests.

    +

    node_nfs_secinfo_no_name_avg_latency

    +

    Average latency of SECINFO_NO_NAME operations.

    @@ -39718,56 +44936,32 @@ - - - - + - + - - - - - - - - - - - - - + - + - - - - - -
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodelink.total
    Unit: none
    Type: rate
    Base:
    secinfo_no_name.average_latency
    Unit: microsec
    Type: average
    Base: secinfo_no_name.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelink.total
    Unit: none
    Type: rate
    Base:
    secinfo_no_name.average_latency
    Unit: microsec
    Type: average
    Base: secinfo_no_name.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodelink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelink_total
    Unit: none
    Type: rate
    Base:
    secinfo_no_name_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_no_name_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelink_total
    Unit: none
    Type: rate
    Base:
    secinfo_no_name_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_no_name_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_lock_avg_latency

    -

    Average latency of LOCK operations.

    +

    node_nfs_secinfo_no_name_total

    +

    Total number of SECINFO_NO_NAME operations.

    @@ -39781,43 +44975,31 @@

    node_nfs_lock_avg_latencyUnit: microsec
    Type: average
    Base: lock.total +

    - + - - - - - - - + - + - - - - - -
    secinfo_no_name.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    secinfo_no_name.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    secinfo_no_name_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    secinfo_no_name_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_lock_total

    -

    Total number of LOCK operations.

    +

    node_nfs_secinfo_total

    +

    Total number of SECINFO operations.

    @@ -39831,43 +45013,43 @@

    node_nfs_lock_totalUnit: none
    Type: rate
    Base: +

    - + - + - + - + - +
    secinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelock.total
    Unit: none
    Type: rate
    Base:
    secinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodelock.total
    Unit: none
    Type: rate
    Base:
    secinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelock_total
    Unit: none
    Type: rate
    Base:
    secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelock_total
    Unit: none
    Type: rate
    Base:
    secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodelock_total
    Unit: none
    Type: rate
    Base:
    secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_lockt_avg_latency

    -

    Average latency of LOCKT operations.

    +

    node_nfs_sequence_avg_latency

    +

    Average latency of SEQUENCE operations.

    @@ -39881,43 +45063,31 @@

    node_nfs_lockt_avg_latencyUnit: microsec
    Type: average
    Base: lockt.total +

    - + - - - - - - - + - + - - - - - -
    sequence.average_latency
    Unit: microsec
    Type: average
    Base: sequence.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    sequence.average_latency
    Unit: microsec
    Type: average
    Base: sequence.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    sequence_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: sequence_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    sequence_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: sequence_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_lockt_total

    -

    Total number of LOCKT operations.

    +

    node_nfs_sequence_total

    +

    Total number of SEQUENCE operations.

    @@ -39931,43 +45101,31 @@

    node_nfs_lockt_totalUnit: none
    Type: rate
    Base: +

    - + - - - - - - - + - + - - - - - -
    sequence.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelockt.total
    Unit: none
    Type: rate
    Base:
    sequence.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelockt.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelockt_total
    Unit: none
    Type: rate
    Base:
    sequence_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelockt_total
    Unit: none
    Type: rate
    Base:
    sequence_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelockt_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_locku_avg_latency

    -

    Average latency of LOCKU operations.

    +

    node_nfs_set_ssv_avg_latency

    +

    Average latency of SET_SSV operations.

    @@ -39981,43 +45139,31 @@

    node_nfs_locku_avg_latencyUnit: microsec
    Type: average
    Base: locku.total +

    - + - - - - - - - + - + - - - - - -
    set_ssv.average_latency
    Unit: microsec
    Type: average
    Base: set_ssv.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelocku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    set_ssv.average_latency
    Unit: microsec
    Type: average
    Base: set_ssv.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelocku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelocku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    set_ssv_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: set_ssv_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelocku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    set_ssv_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: set_ssv_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelocku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_locku_total

    -

    Total number of LOCKU operations.

    +

    node_nfs_set_ssv_total

    +

    Total number of SET_SSV operations.

    @@ -40031,43 +45177,31 @@

    node_nfs_locku_totalUnit: none
    Type: rate
    Base: +

    - + - - - - - - - + - + - - - - - -
    set_ssv.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelocku.total
    Unit: none
    Type: rate
    Base:
    set_ssv.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodelocku.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelocku_total
    Unit: none
    Type: rate
    Base:
    set_ssv_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelocku_total
    Unit: none
    Type: rate
    Base:
    set_ssv_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodelocku_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_lookup_avg_latency

    -

    Average latency of LookUp procedure requests. This shows the average time it takes for the LookUp operation to reply to the request.

    +

    node_nfs_setattr_avg_latency

    +

    Average latency of SetAttr procedure requests. The counter keeps track of the average response time of SetAttr requests.

    @@ -40081,55 +45215,55 @@

    node_nfs_lookup_avg_latencyUnit: microsec
    Type: average
    Base: lookup.total +

    - + - + - + - + - + - + - +
    setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodelookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodelookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodelookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodelookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_lookup_total

    -

    Total number of Lookup procedure requests. It is the total number of lookup success and lookup error requests.

    +

    node_nfs_setattr_total

    +

    Total number of Setattr procedure requests. It is the total number of Setattr success and setattr error requests.

    @@ -40143,55 +45277,55 @@

    node_nfs_lookup_totalUnit: none
    Type: rate
    Base: +

    - + - + - + - + - + - + - +
    setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodelookup.total
    Unit: none
    Type: rate
    Base:
    setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodelookup.total
    Unit: none
    Type: rate
    Base:
    setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodelookup.total
    Unit: none
    Type: rate
    Base:
    setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodelookup_total
    Unit: none
    Type: rate
    Base:
    setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodelookup_total
    Unit: none
    Type: rate
    Base:
    setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodelookup_total
    Unit: none
    Type: rate
    Base:
    setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodelookup_total
    Unit: none
    Type: rate
    Base:
    setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_lookupp_avg_latency

    -

    Average latency of LOOKUPP operations.

    +

    node_nfs_setclientid_avg_latency

    +

    Average latency of SETCLIENTID procedures

    @@ -40204,44 +45338,20 @@

    node_nfs_lookupp_avg_latencyUnit: microsec
    Type: average
    Base: lookupp.total -

    - - - - - - - - - - + - - - - - - - - - - - - - +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodelookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    setclientid.average_latency
    Unit: microsec
    Type: average
    Base: setclientid.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodelookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodelookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    setclientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setclientid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_lookupp_total

    -

    Total number of LOOKUPP operations.

    +

    node_nfs_setclientid_confirm_avg_latency

    +

    Average latency of SETCLIENTID_CONFIRM procedures

    @@ -40254,44 +45364,20 @@

    node_nfs_lookupp_total

    - - - - - - - - - - - - - + - - - - - - - - - - - - - +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodelookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodelookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodelookupp.total
    Unit: none
    Type: rate
    Base:
    setclientid_confirm.average_latency
    Unit: microsec
    Type: average
    Base: setclientid_confirm.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodelookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodelookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodelookupp_total
    Unit: none
    Type: rate
    Base:
    setclientid_confirm_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setclientid_confirm_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_mkdir_avg_latency

    -

    Average latency of MkDir procedure requests. The counter keeps track of the average response time of MkDir requests.

    +

    node_nfs_setclientid_confirm_total

    +

    Total number of SETCLIENTID_CONFIRM procedures

    @@ -40304,20 +45390,20 @@

    node_nfs_mkdir_avg_latencyUnit: microsec
    Type: average
    Base: mkdir.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/svm_nfs_v4:nodesetclientid_confirm.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodemkdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: mkdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nfsv4:nodesetclientid_confirm_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_mkdir_total

    -

    Total number MkDir of procedure requests. It is the total number of MkDir success and MkDir error requests.

    +

    node_nfs_setclientid_total

    +

    Total number of SETCLIENTID procedures

    @@ -40330,20 +45416,20 @@

    node_nfs_mkdir_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/svm_nfs_v4:nodesetclientid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodemkdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nfsv4:nodesetclientid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_mknod_avg_latency

    -

    Average latency of MkNod procedure requests. The counter keeps track of the average response time of MkNod requests.

    + +

    Average latency of SymLink procedure requests. The counter keeps track of the average response time of SymLink requests.

    @@ -40357,19 +45443,19 @@

    node_nfs_mknod_avg_latencyUnit: microsec
    Type: average
    Base: mknod.total +

    - +
    symlink.average_latency
    Unit: microsec
    Type: average
    Base: symlink.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodemknod_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: mknod_total
    symlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: symlink_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_mknod_total

    -

    Total number MkNod of procedure requests. It is the total number of MkNod success and MkNod error requests.

    + +

    Total number SymLink of procedure requests. It is the total number of SymLink success and create SymLink requests.

    @@ -40383,19 +45469,19 @@

    node_nfs_mknod_totalUnit: none
    Type: rate
    Base: +

    - +
    symlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodemknod_total
    Unit: none
    Type: rate
    Base:
    symlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    -

    node_nfs_null_avg_latency

    -

    Average latency of Null procedure requests.

    +

    node_nfs_test_stateid_avg_latency

    +

    Average latency of TEST_STATEID operations.

    @@ -40408,56 +45494,70 @@

    node_nfs_null_avg_latencyUnit: microsec
    Type: average
    Base: null.total -

    - - - - + - + - - - - + + + + - - - + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodenull.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    test_stateid.average_latency
    Unit: microsec
    Type: average
    Base: test_stateid.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodenull.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    test_stateid.average_latency
    Unit: microsec
    Type: average
    Base: test_stateid.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodenull.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4_node.yamlZAPIperf-object-get-instances nfsv4_1:nodetest_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: test_stateid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenull_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nfsv4_2:nodetest_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: test_stateid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    +

    node_nfs_test_stateid_total

    +

    Total number of TEST_STATEID operations.

    + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - -
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodetest_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodetest_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodenull_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    test_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodenull_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    test_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodenull_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_null_total

    -

    Total number of Null procedure requests. It is the total of null success and null error requests.

    +

    node_nfs_throughput

    +

    Rate of NFSv3 data transfers per second.

    @@ -40471,55 +45571,55 @@

    node_nfs_null_totalUnit: none
    Type: rate
    Base: +

    - + - + - + - + - + - + - +
    throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodenull.total
    Unit: none
    Type: rate
    Base:
    total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodenull.total
    Unit: none
    Type: rate
    Base:
    total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodenull.total
    Unit: none
    Type: rate
    Base:
    total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv3:nodenull_total
    Unit: none
    Type: rate
    Base:
    nfsv3_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodenull_total
    Unit: none
    Type: rate
    Base:
    nfs41_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodenull_total
    Unit: none
    Type: rate
    Base:
    nfs42_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodenull_total
    Unit: none
    Type: rate
    Base:
    nfs4_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_nverify_avg_latency

    -

    Average latency of NVERIFY operations.

    +

    node_nfs_total_ops

    +

    Total number of NFSv3 procedure requests per second.

    @@ -40532,44 +45632,56 @@

    node_nfs_nverify_avg_latencyUnit: per_sec
    Type: rate
    Base: +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodenverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodenverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodenverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodenverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodenverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodenverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_nverify_total

    -

    Total number of NVERIFY operations.

    +

    node_nfs_verify_avg_latency

    +

    Average latency of VERIFY operations.

    @@ -40583,43 +45695,43 @@

    node_nfs_nverify_total

    - + - + - + - + - + - +
    REST api/cluster/counter/tables/svm_nfs_v41:nodenverify.total
    Unit: none
    Type: rate
    Base:
    verify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodenverify.total
    Unit: none
    Type: rate
    Base:
    verify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodenverify.total
    Unit: none
    Type: rate
    Base:
    verify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodenverify_total
    Unit: none
    Type: rate
    Base:
    verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodenverify_total
    Unit: none
    Type: rate
    Base:
    verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodenverify_total
    Unit: none
    Type: rate
    Base:
    verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_open_avg_latency

    -

    Average latency of OPEN operations.

    +

    node_nfs_verify_total

    +

    Total number of VERIFY operations.

    @@ -40633,43 +45745,43 @@

    node_nfs_open_avg_latencyUnit: microsec
    Type: average
    Base: open.total +

    - + - + - + - + - +
    verify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeopen.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    verify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodeopen.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    verify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeopen_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeopen_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodeopen_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_open_confirm_avg_latency

    -

    Average latency of OPEN_CONFIRM procedures

    +

    node_nfs_want_delegation_avg_latency

    +

    Average latency of WANT_DELEGATION operations.

    @@ -40682,20 +45794,32 @@

    node_nfs_open_confirm_avg_latency

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeopen_confirm.average_latency
    Unit: microsec
    Type: average
    Base: open_confirm.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodewant_delegation.average_latency
    Unit: microsec
    Type: average
    Base: want_delegation.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodewant_delegation.average_latency
    Unit: microsec
    Type: average
    Base: want_delegation.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeopen_confirm_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_confirm_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nfsv4_1:nodewant_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: want_delegation_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodewant_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: want_delegation_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_nfs_open_confirm_total

    -

    Total number of OPEN_CONFIRM procedures

    +

    node_nfs_want_delegation_total

    +

    Total number of WANT_DELEGATION operations.

    @@ -40708,20 +45832,32 @@

    node_nfs_open_confirm_totalUnit: none
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/svm_nfs_v41:nodewant_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodewant_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeopen_confirm_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nfsv4_1:nodewant_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodewant_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    -

    node_nfs_open_downgrade_avg_latency

    -

    Average latency of OPEN_DOWNGRADE operations.

    +

    node_nfs_write_avg_latency

    +

    Average latency of Write procedure requests. The counter keeps track of the average response time of Write requests.

    @@ -40734,44 +45870,82 @@

    node_nfs_open_downgrade_avg_latency

    + + + + + + - + - + - + + + + + + + - + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodewrite.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodeopen_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeopen_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodeopen_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodewrite_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeopen_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeopen_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodeopen_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_open_downgrade_total

    -

    Total number of OPEN_DOWNGRADE operations.

    +

    node_nfs_write_ops

    +

    Total observed NFSv3 write operations per second.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodewrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_write_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    +

    node_nfs_write_throughput

    +

    Rate of NFSv3 write data transfers per second.

    @@ -40784,44 +45958,56 @@

    node_nfs_open_downgrade_totalUnit: b_per_sec
    Type: rate
    Base: +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodeopen_downgrade.total
    Unit: none
    Type: rate
    Base:
    total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeopen_downgrade.total
    Unit: none
    Type: rate
    Base:
    total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodeopen_downgrade.total
    Unit: none
    Type: rate
    Base:
    total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeopen_downgrade_total
    Unit: none
    Type: rate
    Base:
    nfs41_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeopen_downgrade_total
    Unit: none
    Type: rate
    Base:
    nfs42_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodeopen_downgrade_total
    Unit: none
    Type: rate
    Base:
    nfs4_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_open_total

    -

    Total number of OPEN operations.

    +

    node_nfs_write_total

    +

    Total number of Write procedure requests. It is the total number of write success and write error requests.

    @@ -40834,44 +46020,56 @@

    node_nfs_open_totalUnit: none
    Type: rate
    Base: +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v41:nodeopen.total
    Unit: none
    Type: rate
    Base:
    write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v42:nodeopen.total
    Unit: none
    Type: rate
    Base:
    write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    REST api/cluster/counter/tables/svm_nfs_v4:nodeopen.total
    Unit: none
    Type: rate
    Base:
    write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodewrite_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPI perf-object-get-instances nfsv4_1:nodeopen_total
    Unit: none
    Type: rate
    Base:
    write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPI perf-object-get-instances nfsv4_2:nodeopen_total
    Unit: none
    Type: rate
    Base:
    write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml
    ZAPI perf-object-get-instances nfsv4:nodeopen_total
    Unit: none
    Type: rate
    Base:
    write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml
    -

    node_nfs_openattr_avg_latency

    -

    Average latency of OPENATTR operations.

    +

    node_nvme_fc_data_recv

    +

    NVMe/FC kilobytes (KB) received per second

    @@ -40884,44 +46082,54 @@

    node_nfs_openattr_avg_latencyUnit: microsec
    Type: average
    Base: openattr.total -

    + + + + +
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/system:nodenvme_fc_data_received
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    +

    node_nvme_fc_data_sent

    +

    NVMe/FC kilobytes (KB) sent per second

    + + - - - - + + + + + + - - - - - - - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeopenattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeopenattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeopenattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/system:nodenvme_fc_data_sent
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    +

    node_nvme_fc_ops

    +

    NVMe/FC operations per second

    + + - - - - + + + + + + - - - - + + + +
    ZAPIperf-object-get-instances nfsv4_2:nodeopenattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4:nodeopenattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlRESTapi/cluster/counter/tables/system:nodenvme_fc_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    -

    node_nfs_openattr_total

    -

    Total number of OPENATTR operations.

    +

    node_nvmf_data_recv

    +

    NVMe/FC kilobytes (KB) received per second.

    @@ -40934,44 +46142,72 @@

    node_nfs_openattr_total

    - - - + + + - - - - + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodeopenattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/system:nodenvme_fc_data_received, 1
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/system_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeopenattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlZAPIperf-object-get-instances system:nodenvmf_data_recv
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    +

    node_nvmf_data_sent

    +

    NVMe/FC kilobytes (KB) sent per second.

    + + + + + + + + + - - - + + + - - - + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeopenattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/system:nodenvme_fc_data_sent, 1
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeopenattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlperf-object-get-instances system:nodenvmf_data_sent
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    +

    node_nvmf_ops

    +

    NVMe/FC operations per second.

    + + + + + + + + + - - - - + + + + - - - + + +
    APIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodeopenattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/system:nodenvme_fc_ops, 1
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeopenattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances system:nodenvmf_ops
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    node_nfs_ops

    -

    Number of NFS operations per second

    +

    node_ssd_data_read

    +

    Number of SSD Disk kilobytes (KB) read per second

    @@ -40985,19 +46221,19 @@

    node_nfs_ops

    - + - +
    REST api/cluster/counter/tables/system:nodenfs_ops
    Unit: per_sec
    Type: rate
    Base:
    ssd_data_read
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPI perf-object-get-instances system:nodenfs_ops
    Unit: per_sec
    Type: rate
    Base:
    ssd_data_read
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    node_nfs_pathconf_avg_latency

    -

    Average latency of PathConf procedure requests. The counter keeps track of the average response time of PathConf requests.

    +

    node_ssd_data_written

    +

    Number of SSD Disk kilobytes (KB) written per second

    @@ -41010,20 +46246,20 @@

    node_nfs_pathconf_avg_latencyUnit: microsec
    Type: average
    Base: pathconf.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/system:nodessd_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodepathconf_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: pathconf_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances system:nodessd_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    node_nfs_pathconf_total

    -

    Total number PathConf of procedure requests. It is the total number of PathConf success and PathConf error requests.

    +

    node_total_data

    +

    Total throughput in bytes

    @@ -41036,20 +46272,20 @@

    node_nfs_pathconf_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodepathconf.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/system:nodetotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodepathconf_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances system:nodetotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    -

    node_nfs_putfh_avg_latency

    -

    The number of successful PUTPUBFH operations.

    +

    node_total_latency

    +

    Average latency for all operations in the system in microseconds

    @@ -41062,44 +46298,72 @@

    node_nfs_putfh_avg_latencyUnit: none
    Type: delta
    Base: -

    + + + - - - - + + + + + + +
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/system:nodetotal_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/system_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeputfh.average_latency
    Unit: microsec
    Type: average
    Base: putfh.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlZAPIperf-object-get-instances system:nodetotal_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    +

    node_total_ops

    +

    Total number of operations per second

    + + + + + + + + + - - - + + + - - - + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputfh.average_latency
    Unit: microsec
    Type: average
    Base: putfh.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/system:nodetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeputfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlperf-object-get-instances system:nodetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yaml
    +

    node_uptime

    +

    The total time, in seconds, that the node has been up.

    + + + + + + + + + - - - - + + + + - - - + + +
    APIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodeputfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/nodesuptimeconf/rest/9.12.0/node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlsystem-node-get-iternode-details-info.node-uptimeconf/zapi/cdot/9.8.0/node.yaml
    -

    node_nfs_putfh_total

    -

    Total number of PUTFH operations.

    +

    node_vol_cifs_other_latency

    +

    Average time for the WAFL filesystem to process other CIFS operations to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    @@ -41112,44 +46376,72 @@

    node_nfs_putfh_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + + +
    conf/restperf/9.12.0/nfsv4_1_node.yamlapi/cluster/counter/tables/volume:nodecifs.other_latency
    Unit: microsec
    Type: average
    Base: cifs.other_ops
    conf/restperf/9.12.0/volume_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeputfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlZAPIperf-object-get-instances volume:nodecifs_other_latency
    Unit: microsec
    Type: average
    Base: cifs_other_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_cifs_other_ops

    +

    Number of other CIFS operations per second to the volume

    + + + + + + + + + - - - + + + - - - + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volume:nodecifs.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeputfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlperf-object-get-instances volume:nodecifs_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_cifs_read_data

    +

    Bytes read per second via CIFS

    + + - - - - + + + + + + + + + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_2:nodeputfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/volume:nodecifs.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodecifs_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_putpubfh_avg_latency

    -

    Average latency of PUTPUBFH operations.

    +

    node_vol_cifs_read_latency

    +

    Average time for the WAFL filesystem to process CIFS read requests to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    @@ -41162,44 +46454,20 @@

    node_nfs_putpubfh_avg_latencyUnit: microsec
    Type: average
    Base: putpubfh.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeputpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeputpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeputpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volume:nodecifs.read_latency
    Unit: microsec
    Type: average
    Base: cifs.read_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodecifs_read_latency
    Unit: microsec
    Type: average
    Base: cifs_read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_putpubfh_total

    -

    Total number of PUTPUBFH operations.

    +

    node_vol_cifs_read_ops

    +

    Number of CIFS read operations per second from the volume

    @@ -41212,44 +46480,20 @@

    node_nfs_putpubfh_total

    - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodeputpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeputpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeputpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeputpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volume:nodecifs.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodecifs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_putrootfh_avg_latency

    -

    Average latency of PUTROOTFH operations.

    +

    node_vol_cifs_write_data

    +

    Bytes written per second via CIFS

    @@ -41262,44 +46506,20 @@

    node_nfs_putrootfh_avg_latencyUnit: microsec
    Type: average
    Base: putrootfh.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeputrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeputrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeputrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volume:nodecifs.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodecifs_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_putrootfh_total

    -

    Total number of PUTROOTFH operations.

    +

    node_vol_cifs_write_latency

    +

    Average time for the WAFL filesystem to process CIFS write requests to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    @@ -41312,44 +46532,20 @@

    node_nfs_putrootfh_total

    - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodeputrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeputrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeputrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeputrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeputrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volume:nodecifs.write_latency
    Unit: microsec
    Type: average
    Base: cifs.write_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodeputrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodecifs_write_latency
    Unit: microsec
    Type: average
    Base: cifs_write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_read_avg_latency

    -

    Average latency of Read procedure requests. The counter keeps track of the average response time of Read requests.

    +

    node_vol_cifs_write_ops

    +

    Number of CIFS write operations per second to the volume

    @@ -41362,56 +46558,72 @@

    node_nfs_read_avg_latencyUnit: microsec
    Type: average
    Base: read.total -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodecifs.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:noderead.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volume:nodecifs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_fcp_other_latency

    +

    Average time for the WAFL filesystem to process other FCP protocol operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderead.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderead.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volume:nodefcp.other_latency
    Unit: microsec
    Type: average
    Base: fcp.other_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderead_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodefcp_other_latency
    Unit: microsec
    Type: average
    Base: fcp_other_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_fcp_other_ops

    +

    Number of other block protocol operations per second to the volume

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:noderead_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:noderead_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volume:nodefcp.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:noderead_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodefcp_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_read_ops

    -

    Total observed NFSv3 read operations per second.

    +

    node_vol_fcp_read_data

    +

    Bytes read per second via block protocol

    @@ -41424,20 +46636,20 @@

    node_nfs_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodefcp.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_read_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodefcp_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    - -

    Average latency of ReadSymLink procedure requests. The counter keeps track of the average response time of ReadSymLink requests.

    +

    node_vol_fcp_read_latency

    +

    Average time for the WAFL filesystem to process FCP protocol read operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    @@ -41450,20 +46662,20 @@ - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:noderead_symlink.average_latency
    Unit: microsec
    Type: average
    Base: read_symlink.total
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodefcp.read_latency
    Unit: microsec
    Type: average
    Base: fcp.read_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderead_symlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_symlink_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodefcp_read_latency
    Unit: microsec
    Type: average
    Base: fcp_read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    - -

    Total number of ReadSymLink procedure requests. It is the total number of read symlink success and read symlink error requests.

    +

    node_vol_fcp_read_ops

    +

    Number of block protocol read operations per second from the volume

    @@ -41476,20 +46688,20 @@ + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodefcp.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderead_symlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodefcp_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_read_throughput

    -

    Rate of NFSv3 read data transfers per second.

    +

    node_vol_fcp_write_data

    +

    Bytes written per second via block protocol

    @@ -41502,56 +46714,72 @@

    node_nfs_read_throughput

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:noderead_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodefcp.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodetotal.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volume:nodefcp_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_fcp_write_latency

    +

    Average time for the WAFL filesystem to process FCP protocol write operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodetotal.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodetotal.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volume:nodefcp.write_latency
    Unit: microsec
    Type: average
    Base: fcp.write_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodefcp_write_latency
    Unit: microsec
    Type: average
    Base: fcp_write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_fcp_write_ops

    +

    Number of block protocol write operations per second to the volume

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodenfs41_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodenfs42_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volume:nodefcp.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodenfs4_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodefcp_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_read_total

    -

    Total number Read of procedure requests. It is the total number of read success and read error requests.

    +

    node_vol_iscsi_other_latency

    +

    Average time for the WAFL filesystem to process other iSCSI protocol operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI protocol request latency

    @@ -41564,56 +46792,72 @@

    node_nfs_read_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodeiscsi.other_latency
    Unit: microsec
    Type: average
    Base: iscsi.other_ops
    conf/restperf/9.12.0/volume_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:noderead.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volume:nodeiscsi_other_latency
    Unit: microsec
    Type: average
    Base: iscsi_other_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_iscsi_other_ops

    +

    Number of other block protocol operations per second to the volume

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderead.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderead.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volume:nodeiscsi.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:noderead_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodeiscsi_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_iscsi_read_data

    +

    Bytes read per second via block protocol

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:noderead_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:noderead_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volume:nodeiscsi.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:noderead_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodeiscsi_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_readdir_avg_latency

    -

    Average latency of ReadDir procedure requests. The counter keeps track of the average response time of ReadDir requests.

    +

    node_vol_iscsi_read_latency

    +

    Average time for the WAFL filesystem to process iSCSI protocol read operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI protocol request latency

    @@ -41626,56 +46870,72 @@

    node_nfs_readdir_avg_latencyUnit: microsec
    Type: average
    Base: readdir.total -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodeiscsi.read_latency
    Unit: microsec
    Type: average
    Base: iscsi.read_ops
    conf/restperf/9.12.0/volume_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodereaddir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volume:nodeiscsi_read_latency
    Unit: microsec
    Type: average
    Base: iscsi_read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_iscsi_read_ops

    +

    Number of block protocol read operations per second from the volume

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodereaddir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodereaddir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volume:nodeiscsi.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodereaddir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodeiscsi_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_iscsi_write_data

    +

    Bytes written per second via block protocol

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodereaddir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodereaddir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volume:nodeiscsi.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodereaddir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodeiscsi_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_readdir_total

    -

    Total number ReadDir of procedure requests. It is the total number of ReadDir success and ReadDir error requests.

    +

    node_vol_iscsi_write_latency

    +

    Average time for the WAFL filesystem to process iSCSI protocol write operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI request latency

    @@ -41688,56 +46948,72 @@

    node_nfs_readdir_total

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodereaddir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodeiscsi.write_latency
    Unit: microsec
    Type: average
    Base: iscsi.write_ops
    conf/restperf/9.12.0/volume_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodereaddir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volume:nodeiscsi_write_latency
    Unit: microsec
    Type: average
    Base: iscsi_write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_iscsi_write_ops

    +

    Number of block protocol write operations per second to the volume

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodereaddir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodereaddir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volume:nodeiscsi.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodereaddir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodeiscsi_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_nfs_other_latency

    +

    Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodereaddir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodereaddir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volume:nodenfs.other_latency
    Unit: microsec
    Type: average
    Base: nfs.other_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodereaddir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodenfs_other_latency
    Unit: microsec
    Type: average
    Base: nfs_other_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_readdirplus_avg_latency

    -

    Average latency of ReadDirPlus procedure requests. The counter keeps track of the average response time of ReadDirPlus requests.

    +

    node_vol_nfs_other_ops

    +

    Number of other NFS operations per second to the volume

    @@ -41750,20 +47026,20 @@

    node_nfs_readdirplus_avg_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodereaddirplus.average_latency
    Unit: microsec
    Type: average
    Base: readdirplus.total
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodenfs.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodereaddirplus_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdirplus_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodenfs_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_readdirplus_total

    -

    Total number ReadDirPlus of procedure requests. It is the total number of ReadDirPlus success and ReadDirPlus error requests.

    +

    node_vol_nfs_read_data

    +

    Bytes read per second via NFS

    @@ -41776,20 +47052,20 @@

    node_nfs_readdirplus_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volume:nodenfs.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodereaddirplus_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volume:nodenfs_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    - -

    Average latency of READLINK operations.

    +

    node_vol_nfs_read_latency

    +

    Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

    @@ -41802,44 +47078,20 @@ - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodereadlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodereadlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodereadlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodereadlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volume:nodenfs.read_latency
    Unit: microsec
    Type: average
    Base: nfs.read_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodereadlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodenfs_read_latency
    Unit: microsec
    Type: average
    Base: nfs_read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    - -

    Total number of READLINK operations.

    +

    node_vol_nfs_read_ops

    +

    Number of NFS read operations per second from the volume

    @@ -41852,44 +47104,46 @@ - - - - - - - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodereadlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodereadlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volume:nodenfs.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodereadlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlZAPIperf-object-get-instances volume:nodenfs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    +

    node_vol_nfs_write_data

    +

    Bytes written per second via NFS

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodereadlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodereadlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volume:nodenfs.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:nodereadlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodenfs_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_reclaim_complete_avg_latency

    -

    Average latency of RECLAIM_COMPLETE operations.

    +

    node_vol_nfs_write_latency

    +

    Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency

    @@ -41902,32 +47156,20 @@

    node_nfs_reclaim_complete_avg_lat

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodereclaim_complete.average_latency
    Unit: microsec
    Type: average
    Base: reclaim_complete.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodereclaim_complete.average_latency
    Unit: microsec
    Type: average
    Base: reclaim_complete.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodereclaim_complete_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: reclaim_complete_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/volume:nodenfs.write_latency
    Unit: microsec
    Type: average
    Base: nfs.write_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodereclaim_complete_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: reclaim_complete_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances volume:nodenfs_write_latency
    Unit: microsec
    Type: average
    Base: nfs_write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_reclaim_complete_total

    -

    Total number of RECLAIM_COMPLETE operations.

    +

    node_vol_nfs_write_ops

    +

    Number of NFS write operations per second to the volume

    @@ -41940,32 +47182,20 @@

    node_nfs_reclaim_complete_total

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodereclaim_complete.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodereclaim_complete.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodereclaim_complete_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/volume:nodenfs.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodereclaim_complete_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances volume:nodenfs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_release_lock_owner_avg_latency

    -

    Average Latency of RELEASE_LOCKOWNER procedures

    +

    node_vol_read_latency

    +

    Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time

    @@ -41978,20 +47208,20 @@

    node_nfs_release_lock_owner_avg

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderelease_lock_owner.average_latency
    Unit: microsec
    Type: average
    Base: release_lock_owner.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volume:noderead_latency
    Unit: microsec
    Type: average
    Base: total_read_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:noderelease_lock_owner_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: release_lock_owner_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:noderead_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_release_lock_owner_total

    -

    Total number of RELEASE_LOCKOWNER procedures

    +

    node_vol_write_latency

    +

    Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time

    @@ -42004,20 +47234,20 @@

    node_nfs_release_lock_owner_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderelease_lock_owner.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volume:nodewrite_latency
    Unit: microsec
    Type: average
    Base: total_write_ops
    conf/restperf/9.12.0/volume_node.yaml
    ZAPIperf-object-get-instances nfsv4:noderelease_lock_owner_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volume:nodewrite_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yaml
    -

    node_nfs_remove_avg_latency

    -

    Average latency of Remove procedure requests. The counter keeps track of the average response time of Remove requests.

    +

    node_volume_avg_latency

    +

    Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time. node_volume_avg_latency is volume_avg_latency aggregated by node.

    @@ -42030,56 +47260,72 @@

    node_nfs_remove_avg_latencyUnit: microsec
    Type: average
    Base: remove.total -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volumeaverage_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/volume.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:noderemove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volumeavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_nfs_access_latency

    +

    Average time for the WAFL filesystem to process NFS protocol access requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_access_latency is volume_nfs_access_latency aggregated by node.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderemove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderemove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volumenfs.access_latency
    Unit: microsec
    Type: average
    Base: nfs.access_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv3:noderemove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volumenfs_access_latency
    Unit: microsec
    Type: average
    Base: nfs_access_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_nfs_access_ops

    +

    Number of NFS accesses per second to the volume. node_volume_nfs_access_ops is volume_nfs_access_ops aggregated by node.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:noderemove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:noderemove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volumenfs.access_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:noderemove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumenfs_access_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_remove_total

    -

    Total number Remove of procedure requests. It is the total number of Remove success and Remove error requests.

    +

    node_volume_nfs_getattr_latency

    +

    Average time for the WAFL filesystem to process NFS protocol getattr requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_getattr_latency is volume_nfs_getattr_latency aggregated by node.

    @@ -42092,56 +47338,72 @@

    node_nfs_remove_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volumenfs.getattr_latency
    Unit: microsec
    Type: average
    Base: nfs.getattr_ops
    conf/restperf/9.12.0/volume.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:noderemove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volumenfs_getattr_latency
    Unit: microsec
    Type: average
    Base: nfs_getattr_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_nfs_getattr_ops

    +

    Number of NFS getattr per second to the volume. node_volume_nfs_getattr_ops is volume_nfs_getattr_ops aggregated by node.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderemove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderemove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volumenfs.getattr_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv3:noderemove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volumenfs_getattr_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_nfs_lookup_latency

    +

    Average time for the WAFL filesystem to process NFS protocol lookup requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_lookup_latency is volume_nfs_lookup_latency aggregated by node.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:noderemove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:noderemove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volumenfs.lookup_latency
    Unit: microsec
    Type: average
    Base: nfs.lookup_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:noderemove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumenfs_lookup_latency
    Unit: microsec
    Type: average
    Base: nfs_lookup_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_rename_avg_latency

    -

    Average latency of Rename procedure requests. The counter keeps track of the average response time of Rename requests.

    +

    node_volume_nfs_lookup_ops

    +

    Number of NFS lookups per second to the volume. node_volume_nfs_lookup_ops is volume_nfs_lookup_ops aggregated by node.

    @@ -42154,56 +47416,72 @@

    node_nfs_rename_avg_latencyUnit: microsec
    Type: average
    Base: rename.total -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volumenfs.lookup_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:noderename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volumenfs_lookup_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_nfs_other_latency

    +

    Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_other_latency is volume_nfs_other_latency aggregated by node.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volumenfs.other_latency
    Unit: microsec
    Type: average
    Base: nfs.other_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv3:noderename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volumenfs_other_latency
    Unit: microsec
    Type: average
    Base: nfs_other_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_nfs_other_ops

    +

    Number of other NFS operations per second to the volume. node_volume_nfs_other_ops is volume_nfs_other_ops aggregated by node.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:noderename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:noderename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volumenfs.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:noderename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumenfs_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_rename_total

    -

    Total number Rename of procedure requests. It is the total number of Rename success and Rename error requests.

    +

    node_volume_nfs_punch_hole_latency

    +

    Average time for the WAFL filesystem to process NFS protocol hole-punch requests to the volume. node_volume_nfs_punch_hole_latency is volume_nfs_punch_hole_latency aggregated by node.

    @@ -42216,56 +47494,72 @@

    node_nfs_rename_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volumenfs.punch_hole_latency
    Unit: microsec
    Type: average
    Base: nfs.punch_hole_ops
    conf/restperf/9.12.0/volume.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:noderename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yamlZAPIperf-object-get-instances volumenfs_punch_hole_latency
    Unit: microsec
    Type: average
    Base: nfs_punch_hole_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_nfs_punch_hole_ops

    +

    Number of NFS hole-punch requests per second to the volume. node_volume_nfs_punch_hole_ops is volume_nfs_punch_hole_ops aggregated by node.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volumenfs.punch_hole_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv3:noderename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volumenfs_punch_hole_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_nfs_read_latency

    +

    Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_read_latency is volume_nfs_read_latency aggregated by node.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:noderename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:noderename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volumenfs.read_latency
    Unit: microsec
    Type: average
    Base: nfs.read_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:noderename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumenfs_read_latency
    Unit: microsec
    Type: average
    Base: nfs_read_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_renew_avg_latency

    -

    Average latency of RENEW procedures

    +

    node_volume_nfs_read_ops

    +

    Number of NFS read operations per second from the volume. node_volume_nfs_read_ops is volume_nfs_read_ops aggregated by node.

    @@ -42278,20 +47572,20 @@

    node_nfs_renew_avg_latencyUnit: microsec
    Type: average
    Base: renew.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volumenfs.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:noderenew_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: renew_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumenfs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_renew_total

    -

    Total number of RENEW procedures

    +

    node_volume_nfs_setattr_latency

    +

    Average time for the WAFL filesystem to process NFS protocol setattr requests to the volume. node_volume_nfs_setattr_latency is volume_nfs_setattr_latency aggregated by node.

    @@ -42304,20 +47598,20 @@

    node_nfs_renew_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/volumenfs.setattr_latency
    Unit: microsec
    Type: average
    Base: nfs.setattr_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:noderenew_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumenfs_setattr_latency
    Unit: microsec
    Type: average
    Base: nfs_setattr_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_restorefh_avg_latency

    -

    Average latency of RESTOREFH operations.

    +

    node_volume_nfs_setattr_ops

    +

    Number of NFS setattr requests per second to the volume. node_volume_nfs_setattr_ops is volume_nfs_setattr_ops aggregated by node.

    @@ -42330,44 +47624,20 @@

    node_nfs_restorefh_avg_latencyUnit: microsec
    Type: average
    Base: restorefh.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderestorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderestorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:noderestorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:noderestorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volumenfs.setattr_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:noderestorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumenfs_setattr_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_restorefh_total

    -

    Total number of RESTOREFH operations.

    +

    node_volume_nfs_total_ops

    +

    Number of total NFS operations per second to the volume. node_volume_nfs_total_ops is volume_nfs_total_ops aggregated by node.

    @@ -42380,44 +47650,20 @@

    node_nfs_restorefh_total

    - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:noderestorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:noderestorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:noderestorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:noderestorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:noderestorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volumenfs.total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:noderestorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumenfs_total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_rmdir_avg_latency

    -

    Average latency of RmDir procedure requests. The counter keeps track of the average response time of RmDir requests.

    +

    node_volume_nfs_write_latency

    +

    Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency. node_volume_nfs_write_latency is volume_nfs_write_latency aggregated by node.

    @@ -42430,20 +47676,20 @@

    node_nfs_rmdir_avg_latencyUnit: microsec
    Type: average
    Base: rmdir.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volumenfs.write_latency
    Unit: microsec
    Type: average
    Base: nfs.write_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv3:nodermdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rmdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volumenfs_write_latency
    Unit: microsec
    Type: average
    Base: nfs_write_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_rmdir_total

    -

    Total number RmDir of procedure requests. It is the total number of RmDir success and RmDir error requests.

    +

    node_volume_nfs_write_ops

    +

    Number of NFS write operations per second to the volume. node_volume_nfs_write_ops is volume_nfs_write_ops aggregated by node.

    @@ -42456,20 +47702,20 @@

    node_nfs_rmdir_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/volumenfs.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv3:nodermdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances volumenfs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_savefh_avg_latency

    -

    Average latency of SAVEFH operations.

    +

    node_volume_other_latency

    +

    Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time. node_volume_other_latency is volume_other_latency aggregated by node.

    @@ -42482,44 +47728,20 @@

    node_nfs_savefh_avg_latencyUnit: microsec
    Type: average
    Base: savefh.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesavefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesavefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesavefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesavefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volumeother_latency
    Unit: microsec
    Type: average
    Base: total_other_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:nodesavefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumeother_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_savefh_total

    -

    Total number of SAVEFH operations.

    +

    node_volume_other_ops

    +

    Number of other operations per second to the volume. node_volume_other_ops is volume_other_ops aggregated by node.

    @@ -42532,44 +47754,20 @@

    node_nfs_savefh_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesavefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesavefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesavefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesavefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volumetotal_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:nodesavefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumeother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_secinfo_avg_latency

    -

    Average latency of SECINFO operations.

    +

    node_volume_read_data

    +

    Bytes read per second. node_volume_read_data is volume_read_data aggregated by node.

    @@ -42582,44 +47780,20 @@

    node_nfs_secinfo_avg_latencyUnit: microsec
    Type: average
    Base: secinfo.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesecinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesecinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesecinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesecinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volumebytes_read
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:nodesecinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumeread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_secinfo_no_name_avg_latency

    -

    Average latency of SECINFO_NO_NAME operations.

    +

    node_volume_read_latency

    +

    Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time. node_volume_read_latency is volume_read_latency aggregated by node.

    @@ -42632,32 +47806,20 @@

    node_nfs_secinfo_no_name_avg_laten

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodesecinfo_no_name.average_latency
    Unit: microsec
    Type: average
    Base: secinfo_no_name.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesecinfo_no_name.average_latency
    Unit: microsec
    Type: average
    Base: secinfo_no_name.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesecinfo_no_name_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_no_name_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/volumeread_latency
    Unit: microsec
    Type: average
    Base: total_read_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesecinfo_no_name_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_no_name_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances volumeread_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_secinfo_no_name_total

    -

    Total number of SECINFO_NO_NAME operations.

    +

    node_volume_read_ops

    +

    Number of read operations per second from the volume. node_volume_read_ops is volume_read_ops aggregated by node.

    @@ -42670,32 +47832,20 @@

    node_nfs_secinfo_no_name_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesecinfo_no_name.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesecinfo_no_name_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/volumetotal_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesecinfo_no_name_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances volumeread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_secinfo_total

    -

    Total number of SECINFO operations.

    +

    node_volume_total_ops

    +

    Number of operations per second serviced by the volume. node_volume_total_ops is volume_total_ops aggregated by node.

    @@ -42708,44 +47858,46 @@

    node_nfs_secinfo_total

    - - - - - - - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodesecinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesecinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yamlapi/cluster/counter/tables/volumetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesecinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlZAPIperf-object-get-instances volumetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    +

    node_volume_write_data

    +

    Bytes written per second. node_volume_write_data is volume_write_data aggregated by node.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodesecinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodesecinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/volumebytes_written
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4:nodesecinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances volumewrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_sequence_avg_latency

    -

    Average latency of SEQUENCE operations.

    +

    node_volume_write_latency

    +

    Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time. node_volume_write_latency is volume_write_latency aggregated by node.

    @@ -42758,32 +47910,20 @@

    node_nfs_sequence_avg_latencyUnit: microsec
    Type: average
    Base: sequence.total -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesequence.average_latency
    Unit: microsec
    Type: average
    Base: sequence.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesequence_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: sequence_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/volumewrite_latency
    Unit: microsec
    Type: average
    Base: total_write_ops
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesequence_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: sequence_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances volumewrite_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_sequence_total

    -

    Total number of SEQUENCE operations.

    +

    node_volume_write_ops

    +

    Number of write operations per second to the volume. node_volume_write_ops is volume_write_ops aggregated by node.

    @@ -42796,32 +47936,20 @@

    node_nfs_sequence_total

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodesequence.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesequence.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesequence_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/volumetotal_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesequence_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances volumewrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yaml
    -

    node_nfs_set_ssv_avg_latency

    -

    Average latency of SET_SSV operations.

    +

    nvme_lif_avg_latency

    +

    Average latency for NVMF operations

    @@ -42834,32 +47962,20 @@

    node_nfs_set_ssv_avg_latencyUnit: microsec
    Type: average
    Base: set_ssv.total -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeset_ssv.average_latency
    Unit: microsec
    Type: average
    Base: set_ssv.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeset_ssv_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: set_ssv_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/nvmf_lifaverage_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeset_ssv_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: set_ssv_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nvmf_fc_lifavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    -

    node_nfs_set_ssv_total

    -

    Total number of SET_SSV operations.

    +

    nvme_lif_avg_other_latency

    +

    Average latency for operations other than read, write, compare or compare-and-write.

    @@ -42872,32 +47988,20 @@

    node_nfs_set_ssv_total

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodeset_ssv.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeset_ssv.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeset_ssv_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/nvmf_lifaverage_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeset_ssv_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nvmf_fc_lifavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    -

    node_nfs_setattr_avg_latency

    -

    Average latency of SetAttr procedure requests. The counter keeps track of the average response time of SetAttr requests.

    +

    nvme_lif_avg_read_latency

    +

    Average latency for read operations

    @@ -42910,56 +48014,20 @@

    node_nfs_setattr_avg_latencyUnit: microsec
    Type: average
    Base: setattr.total -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodesetattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesetattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesetattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodesetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/nvmf_lifaverage_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv4:nodesetattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_fc_lifavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    -

    node_nfs_setattr_total

    -

    Total number of Setattr procedure requests. It is the total number of Setattr success and setattr error requests.

    +

    nvme_lif_avg_write_latency

    +

    Average latency for write operations

    @@ -42972,56 +48040,20 @@

    node_nfs_setattr_total

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodesetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodesetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodesetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesetattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodesetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodesetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodesetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/nvmf_lifaverage_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv4:nodesetattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_fc_lifavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    -

    node_nfs_setclientid_avg_latency

    -

    Average latency of SETCLIENTID procedures

    +

    nvme_lif_other_ops

    +

    Number of operations that are not read, write, compare or compare-and-write.

    @@ -43034,20 +48066,20 @@

    node_nfs_setclientid_avg_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesetclientid.average_latency
    Unit: microsec
    Type: average
    Base: setclientid.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/nvmf_lifother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv4:nodesetclientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setclientid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_fc_lifother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    -

    node_nfs_setclientid_confirm_avg_latency

    -

    Average latency of SETCLIENTID_CONFIRM procedures

    +

    nvme_lif_read_data

    +

    Amount of data read from the storage system

    @@ -43060,20 +48092,20 @@

    node_nfs_setclientid_confirm_a

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesetclientid_confirm.average_latency
    Unit: microsec
    Type: average
    Base: setclientid_confirm.total
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/nvmf_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv4:nodesetclientid_confirm_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setclientid_confirm_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_fc_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    -

    node_nfs_setclientid_confirm_total

    -

    Total number of SETCLIENTID_CONFIRM procedures

    +

    nvme_lif_read_ops

    +

    Number of read operations

    @@ -43086,20 +48118,20 @@

    node_nfs_setclientid_confirm_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodesetclientid_confirm.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/nvmf_lifread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv4:nodesetclientid_confirm_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_fc_lifread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    -

    node_nfs_setclientid_total

    -

    Total number of SETCLIENTID procedures

    +

    nvme_lif_total_ops

    +

    Total number of operations.

    @@ -43112,20 +48144,20 @@

    node_nfs_setclientid_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/nvmf_liftotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv4:nodesetclientid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_fc_liftotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    - -

    Average latency of SymLink procedure requests. The counter keeps track of the average response time of SymLink requests.

    +

    nvme_lif_write_data

    +

    Amount of data written to the storage system

    @@ -43138,20 +48170,20 @@ + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/nvmf_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv3:nodesymlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: symlink_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nvmf_fc_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    - -

    Total number SymLink of procedure requests. It is the total number of SymLink success and create SymLink requests.

    +

    nvme_lif_write_ops

    +

    Number of write operations

    @@ -43164,20 +48196,20 @@ - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3:nodesymlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3_node.yamlapi/cluster/counter/tables/nvmf_lifwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nvmf_lif.yaml
    ZAPIperf-object-get-instances nfsv3:nodesymlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nvmf_fc_lifwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml
    -

    node_nfs_test_stateid_avg_latency

    -

    Average latency of TEST_STATEID operations.

    +

    nvmf_rdma_port_avg_latency

    +

    Average latency for NVMF operations

    @@ -43190,32 +48222,20 @@

    node_nfs_test_stateid_avg_latency

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodetest_stateid.average_latency
    Unit: microsec
    Type: average
    Base: test_stateid.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodetest_stateid.average_latency
    Unit: microsec
    Type: average
    Base: test_stateid.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodetest_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: test_stateid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/nvmf_rdma_portaverage_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodetest_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: test_stateid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nvmf_rdma_portavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_test_stateid_total

    -

    Total number of TEST_STATEID operations.

    +

    nvmf_rdma_port_avg_other_latency

    +

    Average latency for operations other than read, write, compare or compare-and-write

    @@ -43228,32 +48248,20 @@

    node_nfs_test_stateid_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodetest_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodetest_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/nvmf_rdma_portaverage_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodetest_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nvmf_rdma_portavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_throughput

    -

    Rate of NFSv3 data transfers per second.

    +

    nvmf_rdma_port_avg_read_latency

    +

    Average latency for read operations

    @@ -43266,56 +48274,20 @@

    node_nfs_throughputUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodetotal.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodetotal.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodetotal.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodenfs41_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodenfs42_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/nvmf_rdma_portaverage_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4:nodenfs4_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_rdma_portavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_total_ops

    -

    Total number of NFSv3 procedure requests per second.

    +

    nvmf_rdma_port_avg_write_latency

    +

    Average latency for write operations

    @@ -43328,56 +48300,46 @@

    node_nfs_total_opsUnit: per_sec
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - + + + - - - + + + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/nvmf_rdma_portaverage_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nvmf_rdma_portavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    +

    nvmf_rdma_port_other_ops

    +

    Number of operations that are not read, write, compare or compare-and-right.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodetotal_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodetotal_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/nvmf_rdma_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4:nodetotal_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_rdma_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_verify_avg_latency

    -

    Average latency of VERIFY operations.

    +

    nvmf_rdma_port_read_data

    +

    Amount of data read from the storage system

    @@ -43390,44 +48352,20 @@

    node_nfs_verify_avg_latencyUnit: microsec
    Type: average
    Base: verify.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeverify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeverify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/nvmf_rdma_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4:nodeverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_rdma_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_verify_total

    -

    Total number of VERIFY operations.

    +

    nvmf_rdma_port_read_ops

    +

    Number of read operations

    @@ -43440,44 +48378,20 @@

    node_nfs_verify_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodeverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodeverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodeverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodeverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/nvmf_rdma_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4:nodeverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_rdma_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_want_delegation_avg_latency

    -

    Average latency of WANT_DELEGATION operations.

    +

    nvmf_rdma_port_total_data

    +

    Amount of NVMF traffic to and from the storage system

    @@ -43490,32 +48404,20 @@

    node_nfs_want_delegation_avg_laten

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodewant_delegation.average_latency
    Unit: microsec
    Type: average
    Base: want_delegation.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodewant_delegation.average_latency
    Unit: microsec
    Type: average
    Base: want_delegation.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodewant_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: want_delegation_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/nvmf_rdma_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodewant_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: want_delegation_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nvmf_rdma_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_want_delegation_total

    -

    Total number of WANT_DELEGATION operations.

    +

    nvmf_rdma_port_total_ops

    +

    Total number of operations.

    @@ -43528,32 +48430,20 @@

    node_nfs_want_delegation_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodewant_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodewant_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlapi/cluster/counter/tables/nvmf_rdma_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodewant_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlperf-object-get-instances nvmf_rdma_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_write_avg_latency

    -

    Average latency of Write procedure requests. The counter keeps track of the average response time of Write requests.

    +

    nvmf_rdma_port_write_data

    +

    Amount of data written to the storage system

    @@ -43566,56 +48456,20 @@

    node_nfs_write_avg_latencyUnit: microsec
    Type: average
    Base: write.total -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodewrite.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodewrite.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodewrite.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodewrite_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodewrite_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodewrite_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/nvmf_rdma_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv4:nodewrite_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_rdma_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_write_ops

    -

    Total observed NFSv3 write operations per second.

    +

    nvmf_rdma_port_write_ops

    +

    Number of write operations

    @@ -43628,20 +48482,20 @@

    node_nfs_write_opsUnit: per_sec
    Type: rate
    Base: -

    + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yamlconf/restperf/9.14.1/nvmf_rdma_port.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_write_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nvmf_rdma_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml
    -

    node_nfs_write_throughput

    -

    Rate of NFSv3 write data transfers per second.

    +

    nvmf_tcp_port_avg_latency

    +

    Average latency for NVMF operations

    @@ -43654,56 +48508,20 @@

    node_nfs_write_throughputUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodetotal.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodetotal.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodetotal.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yaml
    ZAPIperf-object-get-instances nfsv3:nodenfsv3_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml
    ZAPIperf-object-get-instances nfsv4_1:nodenfs41_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml
    ZAPIperf-object-get-instances nfsv4_2:nodenfs42_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlapi/cluster/counter/tables/nvmf_tcp_portaverage_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances nfsv4:nodenfs4_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_tcp_portavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_nfs_write_total

    -

    Total number of Write procedure requests. It is the total number of write success and write error requests.

    +

    nvmf_tcp_port_avg_other_latency

    +

    Average latency for operations other than read, write, compare or compare-and-write

    @@ -43716,56 +48534,46 @@

    node_nfs_write_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - + + + - - - + + + + +
    conf/restperf/9.12.0/nfsv3_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41:nodewrite.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42:nodewrite.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2_node.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4:nodewrite.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_node.yamlapi/cluster/counter/tables/nvmf_tcp_portaverage_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances nfsv3:nodewrite_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3_node.yamlperf-object-get-instances nvmf_tcp_portavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    +

    nvmf_tcp_port_avg_read_latency

    +

    Average latency for read operations

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1:nodewrite_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_2:nodewrite_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yamlRESTapi/cluster/counter/tables/nvmf_tcp_portaverage_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances nfsv4:nodewrite_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_node.yamlperf-object-get-instances nvmf_tcp_portavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_nvme_fc_data_recv

    -

    NVMe/FC kilobytes (KB) received per second

    +

    nvmf_tcp_port_avg_write_latency

    +

    Average latency for write operations

    @@ -43778,14 +48586,20 @@

    node_nvme_fc_data_recv

    - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/system:nodenvme_fc_data_received
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/nvmf_tcp_portaverage_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_nvme_fc_data_sent

    -

    NVMe/FC kilobytes (KB) sent per second

    +

    nvmf_tcp_port_other_ops

    +

    Number of operations that are not read, write, compare or compare-and-write.

    @@ -43798,14 +48612,20 @@

    node_nvme_fc_data_sent

    - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/system:nodenvme_fc_data_sent
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/nvmf_tcp_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_nvme_fc_ops

    -

    NVMe/FC operations per second

    +

    nvmf_tcp_port_read_data

    +

    Amount of data read from the storage system

    @@ -43818,14 +48638,20 @@

    node_nvme_fc_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/nvmf_tcp_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_nvmf_data_recv

    -

    NVMe/FC kilobytes (KB) received per second.

    +

    nvmf_tcp_port_read_ops

    +

    Number of read operations

    @@ -43838,20 +48664,20 @@

    node_nvmf_data_recvUnit:
    Type:
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/nvmf_tcp_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances system:nodenvmf_data_recv
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nvmf_tcp_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_nvmf_data_sent

    -

    NVMe/FC kilobytes (KB) sent per second.

    +

    nvmf_tcp_port_total_data

    +

    Amount of NVMF traffic to and from the storage system

    @@ -43864,20 +48690,20 @@

    node_nvmf_data_sentUnit:
    Type:
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/nvmf_tcp_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances system:nodenvmf_data_sent
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nvmf_tcp_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_nvmf_ops

    -

    NVMe/FC operations per second.

    +

    nvmf_tcp_port_total_ops

    +

    Total number of operations.

    @@ -43890,20 +48716,20 @@

    node_nvmf_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/system:nodenvme_fc_ops, 1
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/nvmf_tcp_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances system:nodenvmf_ops
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nvmf_tcp_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_ssd_data_read

    -

    Number of SSD Disk kilobytes (KB) read per second

    +

    nvmf_tcp_port_write_data

    +

    Amount of data written to the storage system

    @@ -43916,20 +48742,20 @@

    node_ssd_data_readUnit: kb_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/nvmf_tcp_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances system:nodessd_data_read
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nvmf_tcp_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_ssd_data_written

    -

    Number of SSD Disk kilobytes (KB) written per second

    +

    nvmf_tcp_port_write_ops

    +

    Number of write operations

    @@ -43942,20 +48768,20 @@

    node_ssd_data_writtenUnit: kb_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/system_node.yamlapi/cluster/counter/tables/nvmf_tcp_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yaml
    ZAPIperf-object-get-instances system:nodessd_data_written
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlperf-object-get-instances nvmf_tcp_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml
    -

    node_total_data

    -

    Total throughput in bytes

    +

    ontaps3_logical_used_size

    +

    Specifies the bucket logical used size up to this point. This field cannot be specified using a POST or PATCH method.

    @@ -43968,20 +48794,13 @@

    node_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - - + + +
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances system:nodetotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlapi/protocols/s3/bucketslogical_used_sizeconf/rest/9.7.0/ontap_s3.yaml
    -

    node_total_latency

    -

    Average latency for all operations in the system in microseconds

    +

    ontaps3_object_count

    @@ -43994,20 +48813,14 @@

    node_total_latencyUnit: microsec
    Type: average
    Base: total_ops -

    - - - - - - + + +
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances system:nodetotal_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/system_node.yamlapi/private/cli/vserver/object-store-server/bucketobject_countconf/rest/9.7.0/ontap_s3.yaml
    -

    node_total_ops

    -

    Total number of operations per second

    +

    ontaps3_size

    +

    Specifies the bucket size in bytes; ranges from 190MB to 62PB.

    @@ -44020,20 +48833,14 @@

    node_total_opsUnit: per_sec
    Type: rate
    Base: -

    - - - - - - + + +
    conf/restperf/9.12.0/system_node.yaml
    ZAPIperf-object-get-instances system:nodetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/system_node.yamlapi/protocols/s3/bucketssizeconf/rest/9.7.0/ontap_s3.yaml
    -

    node_uptime

    -

    The total time, in seconds, that the node has been up.

    +

    ontaps3_svm_abort_multipart_upload_failed

    +

    Number of failed Abort Multipart Upload operations.

    @@ -44046,20 +48853,20 @@

    node_uptime

    - - - + + + - - - + + +
    RESTapi/cluster/nodesuptimeconf/rest/9.12.0/node.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIsystem-node-get-iternode-details-info.node-uptimeconf/zapi/cdot/9.8.0/node.yamlperf-object-get-instances object_store_serverabort_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_cifs_other_latency

    -

    Average time for the WAFL filesystem to process other CIFS operations to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    +

    ontaps3_svm_abort_multipart_upload_failed_client_close

    +

    Number of times Abort Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    @@ -44072,20 +48879,20 @@

    node_vol_cifs_other_latencyUnit: microsec
    Type: average
    Base: cifs.other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodecifs_other_latency
    Unit: microsec
    Type: average
    Base: cifs_other_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverabort_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_cifs_other_ops

    -

    Number of other CIFS operations per second to the volume

    +

    ontaps3_svm_abort_multipart_upload_latency

    +

    Average latency for Abort Multipart Upload operations.

    @@ -44098,20 +48905,20 @@

    node_vol_cifs_other_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodecifs.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: abort_multipart_upload_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodecifs_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverabort_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: abort_multipart_upload_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_cifs_read_data

    -

    Bytes read per second via CIFS

    +

    ontaps3_svm_abort_multipart_upload_rate

    +

    Number of Abort Multipart Upload operations per second.

    @@ -44124,20 +48931,20 @@

    node_vol_cifs_read_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodecifs.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodecifs_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverabort_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_cifs_read_latency

    -

    Average time for the WAFL filesystem to process CIFS read requests to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    +

    ontaps3_svm_abort_multipart_upload_total

    +

    Number of Abort Multipart Upload operations.

    @@ -44150,20 +48957,20 @@

    node_vol_cifs_read_latencyUnit: microsec
    Type: average
    Base: cifs.read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodecifs_read_latency
    Unit: microsec
    Type: average
    Base: cifs_read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverabort_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_cifs_read_ops

    -

    Number of CIFS read operations per second from the volume

    +

    ontaps3_svm_allow_access

    +

    Number of times access was allowed.

    @@ -44176,20 +48983,20 @@

    node_vol_cifs_read_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodecifs.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverallow_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodecifs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverallow_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_cifs_write_data

    -

    Bytes written per second via CIFS

    +

    ontaps3_svm_anonymous_access

    +

    Number of times anonymous access was allowed.

    @@ -44202,20 +49009,20 @@

    node_vol_cifs_write_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodecifs.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serveranonymous_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodecifs_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serveranonymous_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_cifs_write_latency

    -

    Average time for the WAFL filesystem to process CIFS write requests to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    +

    ontaps3_svm_anonymous_deny_access

    +

    Number of times anonymous access was denied.

    @@ -44228,20 +49035,20 @@

    node_vol_cifs_write_latencyUnit: microsec
    Type: average
    Base: cifs.write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serveranonymous_deny_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodecifs_write_latency
    Unit: microsec
    Type: average
    Base: cifs_write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serveranonymous_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_cifs_write_ops

    -

    Number of CIFS write operations per second to the volume

    +

    ontaps3_svm_authentication_failures

    +

    Number of authentication failures.

    @@ -44254,20 +49061,20 @@

    node_vol_cifs_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodecifs.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverauthentication_failures
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodecifs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverauthentication_failures
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_fcp_other_latency

    -

    Average time for the WAFL filesystem to process other FCP protocol operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    +

    ontaps3_svm_chunked_upload_reqs

    +

    Total number of object store server chunked object upload requests

    @@ -44280,20 +49087,20 @@

    node_vol_fcp_other_latencyUnit: microsec
    Type: average
    Base: fcp.other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverchunked_upload_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodefcp_other_latency
    Unit: microsec
    Type: average
    Base: fcp_other_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverchunked_upload_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_fcp_other_ops

    -

    Number of other block protocol operations per second to the volume

    +

    ontaps3_svm_complete_multipart_upload_failed

    +

    Number of failed Complete Multipart Upload operations.

    @@ -44306,20 +49113,20 @@

    node_vol_fcp_other_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodefcp.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodefcp_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_fcp_read_data

    -

    Bytes read per second via block protocol

    +

    ontaps3_svm_complete_multipart_upload_failed_client_close

    +

    Number of times Complete Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    @@ -44332,20 +49139,20 @@

    node_vol_fcp_read_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodefcp.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodefcp_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_fcp_read_latency

    -

    Average time for the WAFL filesystem to process FCP protocol read operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    +

    ontaps3_svm_complete_multipart_upload_latency

    +

    Average latency for Complete Multipart Upload operations.

    @@ -44358,20 +49165,20 @@

    node_vol_fcp_read_latencyUnit: microsec
    Type: average
    Base: fcp.read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: complete_multipart_upload_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodefcp_read_latency
    Unit: microsec
    Type: average
    Base: fcp_read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: complete_multipart_upload_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_fcp_read_ops

    -

    Number of block protocol read operations per second from the volume

    +

    ontaps3_svm_complete_multipart_upload_rate

    +

    Number of Complete Multipart Upload operations per second.

    @@ -44384,20 +49191,20 @@

    node_vol_fcp_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodefcp_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_fcp_write_data

    -

    Bytes written per second via block protocol

    +

    ontaps3_svm_complete_multipart_upload_total

    +

    Number of Complete Multipart Upload operations.

    @@ -44410,20 +49217,20 @@

    node_vol_fcp_write_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodefcp.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodefcp_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_fcp_write_latency

    -

    Average time for the WAFL filesystem to process FCP protocol write operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    +

    ontaps3_svm_connected_connections

    +

    Number of object store server connections currently established

    @@ -44436,20 +49243,20 @@

    node_vol_fcp_write_latencyUnit: microsec
    Type: average
    Base: fcp.write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverconnected_connections
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodefcp_write_latency
    Unit: microsec
    Type: average
    Base: fcp_write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverconnected_connections
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_fcp_write_ops

    -

    Number of block protocol write operations per second to the volume

    +

    ontaps3_svm_connections

    +

    Total number of object store server connections.

    @@ -44462,20 +49269,20 @@

    node_vol_fcp_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodefcp.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverconnections
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodefcp_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverconnections
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_iscsi_other_latency

    -

    Average time for the WAFL filesystem to process other iSCSI protocol operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI protocol request latency

    +

    ontaps3_svm_create_bucket_failed

    +

    Number of failed Create Bucket operations.

    @@ -44488,20 +49295,20 @@

    node_vol_iscsi_other_latencyUnit: microsec
    Type: average
    Base: iscsi.other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercreate_bucket_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodeiscsi_other_latency
    Unit: microsec
    Type: average
    Base: iscsi_other_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercreate_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_iscsi_other_ops

    -

    Number of other block protocol operations per second to the volume

    +

    ontaps3_svm_create_bucket_failed_client_close

    +

    Number of times Create Bucket operation failed because client terminated connection while the operation was still pending on server.

    @@ -44514,20 +49321,20 @@

    node_vol_iscsi_other_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodeiscsi.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercreate_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodeiscsi_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercreate_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_iscsi_read_data

    -

    Bytes read per second via block protocol

    +

    ontaps3_svm_create_bucket_latency

    +

    Average latency for Create Bucket operations.

    @@ -44540,20 +49347,20 @@

    node_vol_iscsi_read_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodeiscsi.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercreate_bucket_latency
    Unit: microsec
    Type: average
    Base: create_bucket_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodeiscsi_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercreate_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_bucket_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_iscsi_read_latency

    -

    Average time for the WAFL filesystem to process iSCSI protocol read operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI protocol request latency

    +

    ontaps3_svm_create_bucket_rate

    +

    Number of Create Bucket operations per second.

    @@ -44566,20 +49373,20 @@

    node_vol_iscsi_read_latencyUnit: microsec
    Type: average
    Base: iscsi.read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercreate_bucket_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodeiscsi_read_latency
    Unit: microsec
    Type: average
    Base: iscsi_read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercreate_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_iscsi_read_ops

    -

    Number of block protocol read operations per second from the volume

    +

    ontaps3_svm_create_bucket_total

    +

    Number of Create Bucket operations.

    @@ -44592,20 +49399,20 @@

    node_vol_iscsi_read_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodeiscsi.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_servercreate_bucket_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodeiscsi_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_servercreate_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_iscsi_write_data

    -

    Bytes written per second via block protocol

    +

    ontaps3_svm_default_deny_access

    +

    Number of times access was denied by default and not through any policy statement.

    @@ -44618,20 +49425,20 @@

    node_vol_iscsi_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdefault_deny_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodeiscsi_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdefault_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_iscsi_write_latency

    -

    Average time for the WAFL filesystem to process iSCSI protocol write operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI request latency

    +

    ontaps3_svm_delete_bucket_failed

    +

    Number of failed Delete Bucket operations.

    @@ -44644,20 +49451,20 @@

    node_vol_iscsi_write_latencyUnit: microsec
    Type: average
    Base: iscsi.write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodeiscsi_write_latency
    Unit: microsec
    Type: average
    Base: iscsi_write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_iscsi_write_ops

    -

    Number of block protocol write operations per second to the volume

    +

    ontaps3_svm_delete_bucket_failed_client_close

    +

    Number of times Delete Bucket operation failed because client terminated connection while the operation was still pending on server.

    @@ -44670,20 +49477,20 @@

    node_vol_iscsi_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodeiscsi.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodeiscsi_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_nfs_other_latency

    -

    Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

    +

    ontaps3_svm_delete_bucket_latency

    +

    Average latency for Delete Bucket operations.

    @@ -44696,20 +49503,20 @@

    node_vol_nfs_other_latencyUnit: microsec
    Type: average
    Base: nfs.other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_latency
    Unit: microsec
    Type: average
    Base: delete_bucket_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodenfs_other_latency
    Unit: microsec
    Type: average
    Base: nfs_other_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_bucket_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_nfs_other_ops

    -

    Number of other NFS operations per second to the volume

    +

    ontaps3_svm_delete_bucket_rate

    +

    Number of Delete Bucket operations per second.

    @@ -44722,20 +49529,20 @@

    node_vol_nfs_other_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodenfs.other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodenfs_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_nfs_read_data

    -

    Bytes read per second via NFS

    +

    ontaps3_svm_delete_bucket_total

    +

    Number of Delete Bucket operations.

    @@ -44748,20 +49555,20 @@

    node_vol_nfs_read_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodenfs.read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodenfs_read_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_nfs_read_latency

    -

    Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

    +

    ontaps3_svm_delete_object_failed

    +

    Number of failed DELETE object operations

    @@ -44774,20 +49581,20 @@

    node_vol_nfs_read_latencyUnit: microsec
    Type: average
    Base: nfs.read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_object_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodenfs_read_latency
    Unit: microsec
    Type: average
    Base: nfs_read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_nfs_read_ops

    -

    Number of NFS read operations per second from the volume

    +

    ontaps3_svm_delete_object_failed_client_close

    +

    Number of times DELETE object operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -44800,20 +49607,20 @@

    node_vol_nfs_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodenfs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_nfs_write_data

    -

    Bytes written per second via NFS

    +

    ontaps3_svm_delete_object_latency

    +

    Average latency for DELETE object operations

    @@ -44826,20 +49633,20 @@

    node_vol_nfs_write_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodenfs.write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_object_latency
    Unit: microsec
    Type: average
    Base: delete_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodenfs_write_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_nfs_write_latency

    -

    Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency

    +

    ontaps3_svm_delete_object_rate

    +

    Number of DELETE object operations per second

    @@ -44852,20 +49659,20 @@

    node_vol_nfs_write_latencyUnit: microsec
    Type: average
    Base: nfs.write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_object_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodenfs_write_latency
    Unit: microsec
    Type: average
    Base: nfs_write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_nfs_write_ops

    -

    Number of NFS write operations per second to the volume

    +

    ontaps3_svm_delete_object_tagging_failed

    +

    Number of failed DELETE object tagging operations.

    @@ -44878,20 +49685,20 @@

    node_vol_nfs_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodenfs.write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodenfs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_read_latency

    -

    Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time

    +

    ontaps3_svm_delete_object_tagging_failed_client_close

    +

    Number of times DELETE object tagging operation failed because client terminated connection while the operation was still pending on server.

    @@ -44904,20 +49711,20 @@

    node_vol_read_latencyUnit: microsec
    Type: average
    Base: total_read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:noderead_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_vol_write_latency

    -

    Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time

    +

    ontaps3_svm_delete_object_tagging_latency

    +

    Average latency for DELETE object tagging operations.

    @@ -44930,20 +49737,20 @@

    node_vol_write_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volume:nodewrite_latency
    Unit: microsec
    Type: average
    Base: total_write_ops
    conf/restperf/9.12.0/volume_node.yamlapi/cluster/counter/tables/object_store_serverdelete_object_tagging_latency
    Unit: microsec
    Type: average
    Base: delete_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volume:nodewrite_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/volume_node.yamlperf-object-get-instances object_store_serverdelete_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_avg_latency

    -

    Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time. node_volume_avg_latency is volume_avg_latency aggregated by node.

    +

    ontaps3_svm_delete_object_tagging_rate

    +

    Number of DELETE object tagging operations per second.

    @@ -44956,20 +49763,20 @@

    node_volume_avg_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volumeaverage_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverdelete_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumeavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverdelete_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_access_latency

    -

    Average time for the WAFL filesystem to process NFS protocol access requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_access_latency is volume_nfs_access_latency aggregated by node.

    +

    ontaps3_svm_delete_object_tagging_total

    +

    Number of DELETE object tagging operations.

    @@ -44982,20 +49789,20 @@

    node_volume_nfs_access_latencyUnit: microsec
    Type: average
    Base: nfs.access_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverdelete_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_access_latency
    Unit: microsec
    Type: average
    Base: nfs_access_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverdelete_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_access_ops

    -

    Number of NFS accesses per second to the volume. node_volume_nfs_access_ops is volume_nfs_access_ops aggregated by node.

    +

    ontaps3_svm_delete_object_total

    +

    Number of DELETE object operations

    @@ -45008,20 +49815,20 @@

    node_volume_nfs_access_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverdelete_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_access_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverdelete_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_getattr_latency

    -

    Average time for the WAFL filesystem to process NFS protocol getattr requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_getattr_latency is volume_nfs_getattr_latency aggregated by node.

    +

    ontaps3_svm_explicit_deny_access

    +

    Number of times access was denied explicitly by a policy statement.

    @@ -45034,20 +49841,20 @@

    node_volume_nfs_getattr_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volumenfs.getattr_latency
    Unit: microsec
    Type: average
    Base: nfs.getattr_ops
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverexplicit_deny_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_getattr_latency
    Unit: microsec
    Type: average
    Base: nfs_getattr_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverexplicit_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_getattr_ops

    -

    Number of NFS getattr per second to the volume. node_volume_nfs_getattr_ops is volume_nfs_getattr_ops aggregated by node.

    +

    ontaps3_svm_get_bucket_acl_failed

    +

    Number of failed GET Bucket ACL operations

    @@ -45060,20 +49867,20 @@

    node_volume_nfs_getattr_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_bucket_acl_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_getattr_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_bucket_acl_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_lookup_latency

    -

    Average time for the WAFL filesystem to process NFS protocol lookup requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_lookup_latency is volume_nfs_lookup_latency aggregated by node.

    +

    ontaps3_svm_get_bucket_acl_total

    +

    Number of GET Bucket ACL operations

    @@ -45086,20 +49893,20 @@

    node_volume_nfs_lookup_latencyUnit: microsec
    Type: average
    Base: nfs.lookup_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_bucket_acl_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_lookup_latency
    Unit: microsec
    Type: average
    Base: nfs_lookup_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_bucket_acl_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_lookup_ops

    -

    Number of NFS lookups per second to the volume. node_volume_nfs_lookup_ops is volume_nfs_lookup_ops aggregated by node.

    +

    ontaps3_svm_get_bucket_versioning_failed

    +

    Number of failed Get Bucket Versioning operations

    @@ -45112,20 +49919,20 @@

    node_volume_nfs_lookup_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_bucket_versioning_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_lookup_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_bucket_versioning_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_other_latency

    -

    Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_other_latency is volume_nfs_other_latency aggregated by node.

    +

    ontaps3_svm_get_bucket_versioning_total

    +

    Number of Get Bucket Versioning operations.

    @@ -45138,20 +49945,20 @@

    node_volume_nfs_other_latencyUnit: microsec
    Type: average
    Base: nfs.other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_bucket_versioning_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_other_latency
    Unit: microsec
    Type: average
    Base: nfs_other_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_bucket_versioning_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_other_ops

    -

    Number of other NFS operations per second to the volume. node_volume_nfs_other_ops is volume_nfs_other_ops aggregated by node.

    +

    ontaps3_svm_get_data

    +

    Rate of GET object data transfers per second

    @@ -45164,20 +49971,20 @@

    node_volume_nfs_other_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_other_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_punch_hole_latency

    -

    Average time for the WAFL filesystem to process NFS protocol hole-punch requests to the volume. node_volume_nfs_punch_hole_latency is volume_nfs_punch_hole_latency aggregated by node.

    +

    ontaps3_svm_get_object_acl_failed

    +

    Number of failed GET Object ACL operations

    @@ -45190,20 +49997,20 @@

    node_volume_nfs_punch_hole_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volumenfs.punch_hole_latency
    Unit: microsec
    Type: average
    Base: nfs.punch_hole_ops
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_acl_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_punch_hole_latency
    Unit: microsec
    Type: average
    Base: nfs_punch_hole_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_acl_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_punch_hole_ops

    -

    Number of NFS hole-punch requests per second to the volume. node_volume_nfs_punch_hole_ops is volume_nfs_punch_hole_ops aggregated by node.

    +

    ontaps3_svm_get_object_acl_total

    +

    Number of GET Object ACL operations

    @@ -45216,20 +50023,20 @@

    node_volume_nfs_punch_hole_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_acl_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_punch_hole_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_acl_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_read_latency

    -

    Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_read_latency is volume_nfs_read_latency aggregated by node.

    +

    ontaps3_svm_get_object_failed

    +

    Number of failed GET object operations

    @@ -45242,20 +50049,20 @@

    node_volume_nfs_read_latencyUnit: microsec
    Type: average
    Base: nfs.read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_read_latency
    Unit: microsec
    Type: average
    Base: nfs_read_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_read_ops

    -

    Number of NFS read operations per second from the volume. node_volume_nfs_read_ops is volume_nfs_read_ops aggregated by node.

    +

    ontaps3_svm_get_object_failed_client_close

    +

    Number of times GET object operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -45268,20 +50075,20 @@

    node_volume_nfs_read_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volumenfs.read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_setattr_latency

    -

    Average time for the WAFL filesystem to process NFS protocol setattr requests to the volume. node_volume_nfs_setattr_latency is volume_nfs_setattr_latency aggregated by node.

    +

    ontaps3_svm_get_object_lastbyte_latency

    +

    Average last-byte latency for GET object operations

    @@ -45294,20 +50101,20 @@

    node_volume_nfs_setattr_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volumenfs.setattr_latency
    Unit: microsec
    Type: average
    Base: nfs.setattr_ops
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_lastbyte_latency
    Unit: microsec
    Type: average
    Base: get_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_setattr_latency
    Unit: microsec
    Type: average
    Base: nfs_setattr_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_lastbyte_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_lastbyte_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_setattr_ops

    -

    Number of NFS setattr requests per second to the volume. node_volume_nfs_setattr_ops is volume_nfs_setattr_ops aggregated by node.

    +

    ontaps3_svm_get_object_latency

    +

    Average first-byte latency for GET object operations

    @@ -45320,20 +50127,20 @@

    node_volume_nfs_setattr_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_latency
    Unit: microsec
    Type: average
    Base: get_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_setattr_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_total_ops

    -

    Number of total NFS operations per second to the volume. node_volume_nfs_total_ops is volume_nfs_total_ops aggregated by node.

    +

    ontaps3_svm_get_object_rate

    +

    Number of GET object operations per second

    @@ -45346,20 +50153,20 @@

    node_volume_nfs_total_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_write_latency

    -

    Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency. node_volume_nfs_write_latency is volume_nfs_write_latency aggregated by node.

    +

    ontaps3_svm_get_object_tagging_failed

    +

    Number of failed GET object tagging operations

    @@ -45372,20 +50179,20 @@

    node_volume_nfs_write_latencyUnit: microsec
    Type: average
    Base: nfs.write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_write_latency
    Unit: microsec
    Type: average
    Base: nfs_write_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_nfs_write_ops

    -

    Number of NFS write operations per second to the volume. node_volume_nfs_write_ops is volume_nfs_write_ops aggregated by node.

    +

    ontaps3_svm_get_object_tagging_failed_client_close

    +

    Number of times GET object tagging operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -45398,20 +50205,20 @@

    node_volume_nfs_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumenfs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_other_latency

    -

    Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time. node_volume_other_latency is volume_other_latency aggregated by node.

    +

    ontaps3_svm_get_object_tagging_latency

    +

    Average latency for GET object tagging operations

    @@ -45424,20 +50231,20 @@

    node_volume_other_latencyUnit: microsec
    Type: average
    Base: total_other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_latency
    Unit: microsec
    Type: average
    Base: get_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumeother_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_other_ops

    -

    Number of other operations per second to the volume. node_volume_other_ops is volume_other_ops aggregated by node.

    +

    ontaps3_svm_get_object_tagging_rate

    +

    Number of GET object tagging operations per second

    @@ -45450,20 +50257,20 @@

    node_volume_other_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumeother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_read_data

    -

    Bytes read per second. node_volume_read_data is volume_read_data aggregated by node.

    +

    ontaps3_svm_get_object_tagging_total

    +

    Number of GET object tagging operations

    @@ -45476,20 +50283,20 @@

    node_volume_read_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumeread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_read_latency

    -

    Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time. node_volume_read_latency is volume_read_latency aggregated by node.

    +

    ontaps3_svm_get_object_total

    +

    Number of GET object operations

    @@ -45502,20 +50309,20 @@

    node_volume_read_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volumeread_latency
    Unit: microsec
    Type: average
    Base: total_read_ops
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverget_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumeread_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverget_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_read_ops

    -

    Number of read operations per second from the volume. node_volume_read_ops is volume_read_ops aggregated by node.

    +

    ontaps3_svm_group_policy_evaluated

    +

    Number of times group policies were evaluated.

    @@ -45528,20 +50335,20 @@

    node_volume_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_servergroup_policy_evaluated
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumeread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_servergroup_policy_evaluated
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_total_ops

    -

    Number of operations per second serviced by the volume. node_volume_total_ops is volume_total_ops aggregated by node.

    +

    ontaps3_svm_head_bucket_failed

    +

    Number of failed HEAD bucket operations

    @@ -45554,20 +50361,20 @@

    node_volume_total_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverhead_bucket_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverhead_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_write_data

    -

    Bytes written per second. node_volume_write_data is volume_write_data aggregated by node.

    +

    ontaps3_svm_head_bucket_failed_client_close

    +

    Number of times HEAD bucket operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -45580,20 +50387,20 @@

    node_volume_write_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/volumebytes_written
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverhead_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumewrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverhead_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_write_latency

    -

    Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time. node_volume_write_latency is volume_write_latency aggregated by node.

    +

    ontaps3_svm_head_bucket_latency

    +

    Average latency for HEAD bucket operations

    @@ -45606,20 +50413,20 @@

    node_volume_write_latencyUnit: microsec
    Type: average
    Base: total_write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverhead_bucket_latency
    Unit: microsec
    Type: average
    Base: head_bucket_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumewrite_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverhead_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_bucket_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    node_volume_write_ops

    -

    Number of write operations per second to the volume. node_volume_write_ops is volume_write_ops aggregated by node.

    +

    ontaps3_svm_head_bucket_rate

    +

    Number of HEAD bucket operations per second

    @@ -45632,20 +50439,20 @@

    node_volume_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/volume.yamlapi/cluster/counter/tables/object_store_serverhead_bucket_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances volumewrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/volume.yamlperf-object-get-instances object_store_serverhead_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_avg_latency

    -

    Average latency for NVMF operations

    +

    ontaps3_svm_head_bucket_total

    +

    Number of HEAD bucket operations

    @@ -45658,20 +50465,20 @@

    nvme_lif_avg_latencyUnit: microsec
    Type: average
    Base: total_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverhead_bucket_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverhead_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_avg_other_latency

    -

    Average latency for operations other than read, write, compare or compare-and-write.

    +

    ontaps3_svm_head_object_failed

    +

    Number of failed HEAD Object operations

    @@ -45684,20 +50491,20 @@

    nvme_lif_avg_other_latencyUnit: microsec
    Type: average
    Base: other_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverhead_object_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverhead_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_avg_read_latency

    -

    Average latency for read operations

    +

    ontaps3_svm_head_object_failed_client_close

    +

    Number of times HEAD object operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -45710,20 +50517,20 @@

    nvme_lif_avg_read_latencyUnit: microsec
    Type: average
    Base: read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverhead_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverhead_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_avg_write_latency

    -

    Average latency for write operations

    +

    ontaps3_svm_head_object_latency

    +

    Average latency for HEAD object operations

    @@ -45736,20 +50543,20 @@

    nvme_lif_avg_write_latencyUnit: microsec
    Type: average
    Base: write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverhead_object_latency
    Unit: microsec
    Type: average
    Base: head_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverhead_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_other_ops

    -

    Number of operations that are not read, write, compare or compare-and-write.

    +

    ontaps3_svm_head_object_rate

    +

    Number of HEAD Object operations per second

    @@ -45762,20 +50569,20 @@

    nvme_lif_other_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverhead_object_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverhead_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_read_data

    -

    Amount of data read from the storage system

    +

    ontaps3_svm_head_object_total

    +

    Number of HEAD Object operations

    @@ -45788,20 +50595,20 @@

    nvme_lif_read_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverhead_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverhead_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_read_ops

    -

    Number of read operations

    +

    ontaps3_svm_initiate_multipart_upload_failed

    +

    Number of failed Initiate Multipart Upload operations.

    @@ -45814,20 +50621,20 @@

    nvme_lif_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_total_ops

    -

    Total number of operations.

    +

    ontaps3_svm_initiate_multipart_upload_failed_client_close

    +

    Number of times Initiate Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    @@ -45840,20 +50647,20 @@

    nvme_lif_total_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_liftotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_write_data

    -

    Amount of data written to the storage system

    +

    ontaps3_svm_initiate_multipart_upload_latency

    +

    Average latency for Initiate Multipart Upload operations.

    @@ -45866,20 +50673,20 @@

    nvme_lif_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: initiate_multipart_upload_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: initiate_multipart_upload_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvme_lif_write_ops

    -

    Number of write operations

    +

    ontaps3_svm_initiate_multipart_upload_rate

    +

    Number of Initiate Multipart Upload operations per second.

    @@ -45892,20 +50699,20 @@

    nvme_lif_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nvmf_lif.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_fc_lifwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.10.1/nvmf_lif.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_avg_latency

    -

    Average latency for NVMF operations

    +

    ontaps3_svm_initiate_multipart_upload_total

    +

    Number of Initiate Multipart Upload operations.

    @@ -45918,20 +50725,20 @@

    nvmf_rdma_port_avg_latencyUnit: microsec
    Type: average
    Base: total_ops -

    + + + - - - + + +
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_avg_other_latency

    -

    Average latency for operations other than read, write, compare or compare-and-write

    +

    ontaps3_svm_input_flow_control_entry

    +

    Number of times input flow control was entered.

    @@ -45944,20 +50751,20 @@

    nvmf_rdma_port_avg_other_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_rdma_portaverage_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverinput_flow_control_entry
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverinput_flow_control_entry
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_avg_read_latency

    -

    Average latency for read operations

    +

    ontaps3_svm_input_flow_control_exit

    +

    Number of times input flow control was exited.

    @@ -45970,20 +50777,20 @@

    nvmf_rdma_port_avg_read_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_rdma_portaverage_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverinput_flow_control_exit
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverinput_flow_control_exit
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_avg_write_latency

    -

    Average latency for write operations

    +

    ontaps3_svm_list_buckets_failed

    +

    Number of failed LIST Buckets operations

    @@ -45996,20 +50803,20 @@

    nvmf_rdma_port_avg_write_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_rdma_portaverage_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverlist_buckets_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_other_ops

    -

    Number of operations that are not read, write, compare or compare-and-right.

    +

    ontaps3_svm_list_buckets_failed_client_close

    +

    Number of times LIST Bucket operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -46022,20 +50829,20 @@

    nvmf_rdma_port_other_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_rdma_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverlist_buckets_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_read_data

    -

    Amount of data read from the storage system

    +

    ontaps3_svm_list_buckets_latency

    +

    Average latency for LIST Buckets operations

    @@ -46048,20 +50855,20 @@

    nvmf_rdma_port_read_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_rdma_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_latency
    Unit: microsec
    Type: average
    Base: list_buckets_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverlist_buckets_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_read_ops

    -

    Number of read operations

    +

    ontaps3_svm_list_buckets_rate

    +

    Number of LIST Buckets operations per second

    @@ -46074,20 +50881,20 @@

    nvmf_rdma_port_read_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_rdma_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverlist_buckets_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_total_data

    -

    Amount of NVMF traffic to and from the storage system

    +

    ontaps3_svm_list_buckets_total

    +

    Number of LIST Buckets operations

    @@ -46100,20 +50907,20 @@

    nvmf_rdma_port_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverlist_buckets_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_total_ops

    -

    Total number of operations.

    +

    ontaps3_svm_list_object_versions_failed

    +

    Number of failed LIST object versions operations

    @@ -46126,20 +50933,20 @@

    nvmf_rdma_port_total_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_rdma_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverlist_object_versions_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_write_data

    -

    Amount of data written to the storage system

    +

    ontaps3_svm_list_object_versions_failed_client_close

    +

    Number of times LIST object versions operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -46152,20 +50959,20 @@

    nvmf_rdma_port_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverlist_object_versions_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_rdma_port_write_ops

    -

    Number of write operations

    +

    ontaps3_svm_list_object_versions_latency

    +

    Average latency for LIST Object versions operations

    @@ -46178,20 +50985,20 @@

    nvmf_rdma_port_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_rdma_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_rdma_port.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_latency
    Unit: microsec
    Type: average
    Base: list_object_versions_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_rdma_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yamlperf-object-get-instances object_store_serverlist_object_versions_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_object_versions_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_avg_latency

    -

    Average latency for NVMF operations

    +

    ontaps3_svm_list_object_versions_rate

    +

    Number of LIST Object Versions operations per second

    @@ -46204,20 +51011,20 @@

    nvmf_tcp_port_avg_latencyUnit: microsec
    Type: average
    Base: total_ops -

    + + + - - - + + +
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portavg_latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_object_versions_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_avg_other_latency

    -

    Average latency for operations other than read, write, compare or compare-and-write

    +

    ontaps3_svm_list_object_versions_total

    +

    Number of LIST Object Versions operations

    @@ -46230,20 +51037,20 @@

    nvmf_tcp_port_avg_other_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_portaverage_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portavg_other_latency
    Unit: microsec
    Type: average
    Base: other_ops
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_object_versions_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_avg_read_latency

    -

    Average latency for read operations

    +

    ontaps3_svm_list_objects_failed

    +

    Number of failed LIST objects operations

    @@ -46256,20 +51063,20 @@

    nvmf_tcp_port_avg_read_latencyUnit: microsec
    Type: average
    Base: read_ops -

    + + + - - - + + +
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_objects_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portavg_read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_objects_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_avg_write_latency

    -

    Average latency for write operations

    +

    ontaps3_svm_list_objects_failed_client_close

    +

    Number of times LIST objects operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -46282,20 +51089,20 @@

    nvmf_tcp_port_avg_write_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_portaverage_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_objects_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portavg_write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_objects_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_other_ops

    -

    Number of operations that are not read, write, compare or compare-and-write.

    +

    ontaps3_svm_list_objects_latency

    +

    Average latency for LIST Objects operations

    @@ -46308,20 +51115,20 @@

    nvmf_tcp_port_other_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_objects_latency
    Unit: microsec
    Type: average
    Base: list_objects_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_objects_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_objects_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_read_data

    -

    Amount of data read from the storage system

    +

    ontaps3_svm_list_objects_rate

    +

    Number of LIST Objects operations per second

    @@ -46334,20 +51141,20 @@

    nvmf_tcp_port_read_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_objects_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_objects_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_read_ops

    -

    Number of read operations

    +

    ontaps3_svm_list_objects_total

    +

    Number of LIST Objects operations

    @@ -46360,20 +51167,20 @@

    nvmf_tcp_port_read_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_objects_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_objects_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_total_data

    -

    Amount of NVMF traffic to and from the storage system

    +

    ontaps3_svm_list_uploads_failed

    +

    Number of failed LIST Upload operations

    @@ -46386,20 +51193,20 @@

    nvmf_tcp_port_total_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_uploads_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_porttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_uploads_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_total_ops

    -

    Total number of operations.

    +

    ontaps3_svm_list_uploads_failed_client_close

    +

    Number of times LIST Upload operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -46412,20 +51219,20 @@

    nvmf_tcp_port_total_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_uploads_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_porttotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_uploads_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_write_data

    -

    Amount of data written to the storage system

    +

    ontaps3_svm_list_uploads_latency

    +

    Average latency for LIST Upload operations

    @@ -46438,20 +51245,20 @@

    nvmf_tcp_port_write_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_uploads_latency
    Unit: microsec
    Type: average
    Base: list_uploads_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portwrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_uploads_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_uploads_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    nvmf_tcp_port_write_ops

    -

    Number of write operations

    +

    ontaps3_svm_list_uploads_rate

    +

    Number of LIST Upload operations per second

    @@ -46464,20 +51271,20 @@

    nvmf_tcp_port_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/nvmf_tcp_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/nvmf_tcp_port.yamlapi/cluster/counter/tables/object_store_serverlist_uploads_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nvmf_tcp_portwrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yamlperf-object-get-instances object_store_serverlist_uploads_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_logical_used_size

    -

    Specifies the bucket logical used size up to this point. This field cannot be specified using a POST or PATCH method.

    +

    ontaps3_svm_list_uploads_total

    +

    Number of LIST Upload operations

    @@ -46490,13 +51297,20 @@

    ontaps3_logical_used_sizeUnit: none
    Type: delta
    Base: +

    + + + + + +
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverlist_uploads_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_object_count

    +

    ontaps3_svm_max_cmds_per_connection

    +

    Maximum commands pipelined at any instance on a connection.

    @@ -46509,14 +51323,20 @@

    ontaps3_object_countUnit: none
    Type: delta
    Base: +

    + + + + + +
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_servermax_cmds_per_connection
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_size

    -

    Specifies the bucket size in bytes; ranges from 190MB to 62PB.

    +

    ontaps3_svm_max_connected_connections

    +

    Maximum number of object store server connections established at one time

    @@ -46529,14 +51349,20 @@

    ontaps3_size

    - - - + + + + + + + + +
    RESTapi/protocols/s3/bucketssizeconf/rest/9.7.0/ontap_s3.yamlapi/cluster/counter/tables/object_store_servermaximum_connected_connections
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_servermax_connected_connections
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_abort_multipart_upload_failed

    -

    Number of failed Abort Multipart Upload operations.

    +

    ontaps3_svm_max_requests_outstanding

    +

    Maximum number of object store server requests in process at one time

    @@ -46550,19 +51376,19 @@

    ontaps3_svm_abort_multipart_u

    - + - +
    REST api/cluster/counter/tables/object_store_serverabort_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    maximum_requests_outstanding
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverabort_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    max_requests_outstanding
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_abort_multipart_upload_failed_client_close

    -

    Number of times Abort Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    +

    ontaps3_svm_multi_delete_reqs

    +

    Total number of object store server multiple object delete requests

    @@ -46576,19 +51402,19 @@

    ontaps3_svm_abor

    - + - +
    REST api/cluster/counter/tables/object_store_serverabort_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    multiple_delete_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverabort_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    multi_delete_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_abort_multipart_upload_latency

    -

    Average latency for Abort Multipart Upload operations.

    +

    ontaps3_svm_output_flow_control_entry

    +

    Number of output flow control was entered.

    @@ -46602,19 +51428,19 @@

    ontaps3_svm_abort_multipart_

    - + - +
    REST api/cluster/counter/tables/object_store_serverabort_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: abort_multipart_upload_total
    output_flow_control_entry
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverabort_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: abort_multipart_upload_latency_base
    output_flow_control_entry
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_abort_multipart_upload_rate

    -

    Number of Abort Multipart Upload operations per second.

    +

    ontaps3_svm_output_flow_control_exit

    +

    Number of times output flow control was exited.

    @@ -46628,19 +51454,19 @@

    ontaps3_svm_abort_multipart_upl

    - + - +
    REST api/cluster/counter/tables/object_store_serverabort_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    output_flow_control_exit
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverabort_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    output_flow_control_exit
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_abort_multipart_upload_total

    -

    Number of Abort Multipart Upload operations.

    +

    ontaps3_svm_presigned_url_reqs

    +

    Total number of presigned object store server URL requests.

    @@ -46654,19 +51480,19 @@

    ontaps3_svm_abort_multipart_up

    - + - +
    REST api/cluster/counter/tables/object_store_serverabort_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    presigned_url_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverabort_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    presigned_url_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_allow_access

    -

    Number of times access was allowed.

    +

    ontaps3_svm_put_bucket_versioning_failed

    +

    Number of failed Put Bucket Versioning operations

    @@ -46680,19 +51506,19 @@

    ontaps3_svm_allow_access

    - + - +
    REST api/cluster/counter/tables/object_store_serverallow_access
    Unit: none
    Type: delta
    Base:
    put_bucket_versioning_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverallow_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_bucket_versioning_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_anonymous_access

    -

    Number of times anonymous access was allowed.

    +

    ontaps3_svm_put_bucket_versioning_total

    +

    Number of Put Bucket Versioning operations.

    @@ -46706,19 +51532,19 @@

    ontaps3_svm_anonymous_accessUnit: none
    Type: delta
    Base: +

    - +
    put_bucket_versioning_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serveranonymous_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_bucket_versioning_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_anonymous_deny_access

    -

    Number of times anonymous access was denied.

    +

    ontaps3_svm_put_data

    +

    Rate of PUT object data transfers per second

    @@ -46732,19 +51558,19 @@

    ontaps3_svm_anonymous_deny_access

    - + - +
    REST api/cluster/counter/tables/object_store_serveranonymous_deny_access
    Unit: none
    Type: delta
    Base:
    put_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serveranonymous_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_authentication_failures

    -

    Number of authentication failures.

    +

    ontaps3_svm_put_object_failed

    +

    Number of failed PUT object operations

    @@ -46758,19 +51584,19 @@

    ontaps3_svm_authentication_failures

    - + - +
    REST api/cluster/counter/tables/object_store_serverauthentication_failures
    Unit: none
    Type: delta
    Base:
    put_object_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverauthentication_failures
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_chunked_upload_reqs

    -

    Total number of object store server chunked object upload requests

    +

    ontaps3_svm_put_object_failed_client_close

    +

    Number of times PUT object operation failed due to the case where client closed the connection while the operation was still pending on server.

    @@ -46784,19 +51610,19 @@

    ontaps3_svm_chunked_upload_reqs

    - + - +
    REST api/cluster/counter/tables/object_store_serverchunked_upload_requests
    Unit: none
    Type: delta
    Base:
    put_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverchunked_upload_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_complete_multipart_upload_failed

    -

    Number of failed Complete Multipart Upload operations.

    +

    ontaps3_svm_put_object_latency

    +

    Average latency for PUT object operations

    @@ -46810,19 +51636,19 @@

    ontaps3_svm_complete_multi

    - + - +
    REST api/cluster/counter/tables/object_store_servercomplete_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    put_object_latency
    Unit: microsec
    Type: average
    Base: put_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercomplete_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: put_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_complete_multipart_upload_failed_client_close

    -

    Number of times Complete Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    +

    ontaps3_svm_put_object_rate

    +

    Number of PUT object operations per second

    @@ -46836,19 +51662,19 @@

    ontaps3_svm_c

    - + - +
    REST api/cluster/counter/tables/object_store_servercomplete_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    put_object_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercomplete_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_complete_multipart_upload_latency

    -

    Average latency for Complete Multipart Upload operations.

    +

    ontaps3_svm_put_object_tagging_failed

    +

    Number of failed PUT object tagging operations.

    @@ -46862,19 +51688,19 @@

    ontaps3_svm_complete_mult

    - + - +
    REST api/cluster/counter/tables/object_store_servercomplete_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: complete_multipart_upload_total
    put_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercomplete_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: complete_multipart_upload_latency_base
    put_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_complete_multipart_upload_rate

    -

    Number of Complete Multipart Upload operations per second.

    +

    ontaps3_svm_put_object_tagging_failed_client_close

    +

    Number of times PUT object tagging operation failed because client terminated connection while the operation was still pending on server.

    @@ -46888,19 +51714,19 @@

    ontaps3_svm_complete_multipa

    - + - +
    REST api/cluster/counter/tables/object_store_servercomplete_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    put_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercomplete_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    put_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_complete_multipart_upload_total

    -

    Number of Complete Multipart Upload operations.

    +

    ontaps3_svm_put_object_tagging_latency

    +

    Average latency for PUT object tagging operations.

    @@ -46914,19 +51740,19 @@

    ontaps3_svm_complete_multip

    - + - +
    REST api/cluster/counter/tables/object_store_servercomplete_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    put_object_tagging_latency
    Unit: microsec
    Type: average
    Base: put_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercomplete_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: put_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_connected_connections

    -

    Number of object store server connections currently established

    +

    ontaps3_svm_put_object_tagging_rate

    +

    Number of PUT object tagging operations per second.

    @@ -46940,19 +51766,19 @@

    ontaps3_svm_connected_connections

    - + - +
    REST api/cluster/counter/tables/object_store_serverconnected_connections
    Unit: none
    Type: raw
    Base:
    put_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverconnected_connections
    Unit: none
    Type: raw,no-zero-values
    Base:
    put_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_connections

    -

    Total number of object store server connections.

    +

    ontaps3_svm_put_object_tagging_total

    +

    Number of PUT object tagging operations.

    @@ -46966,19 +51792,19 @@

    ontaps3_svm_connections

    - + - +
    REST api/cluster/counter/tables/object_store_serverconnections
    Unit: none
    Type: delta
    Base:
    put_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverconnections
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_create_bucket_failed

    -

    Number of failed Create Bucket operations.

    +

    ontaps3_svm_put_object_total

    +

    Number of PUT object operations

    @@ -46992,19 +51818,19 @@

    ontaps3_svm_create_bucket_failed

    - + - +
    REST api/cluster/counter/tables/object_store_servercreate_bucket_failed
    Unit: none
    Type: delta
    Base:
    put_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercreate_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    put_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_create_bucket_failed_client_close

    -

    Number of times Create Bucket operation failed because client terminated connection while the operation was still pending on server.

    +

    ontaps3_svm_request_parse_errors

    +

    Number of request parser errors due to malformed requests.

    @@ -47018,19 +51844,19 @@

    ontaps3_svm_create_bucket

    - + - +
    REST api/cluster/counter/tables/object_store_servercreate_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    request_parse_errors
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercreate_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    request_parse_errors
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_create_bucket_latency

    -

    Average latency for Create Bucket operations.

    +

    ontaps3_svm_requests

    +

    Total number of object store server requests

    @@ -47044,19 +51870,19 @@

    ontaps3_svm_create_bucket_latency

    - + - +
    REST api/cluster/counter/tables/object_store_servercreate_bucket_latency
    Unit: microsec
    Type: average
    Base: create_bucket_total
    requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercreate_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_bucket_latency_base
    requests
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_create_bucket_rate

    -

    Number of Create Bucket operations per second.

    +

    ontaps3_svm_requests_outstanding

    +

    Number of object store server requests in process

    @@ -47070,19 +51896,19 @@

    ontaps3_svm_create_bucket_rateUnit: per_sec
    Type: rate
    Base: +

    - +
    requests_outstanding
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercreate_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    requests_outstanding
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_create_bucket_total

    -

    Number of Create Bucket operations.

    +

    ontaps3_svm_root_user_access

    +

    Number of times access was done by root user.

    @@ -47096,19 +51922,19 @@

    ontaps3_svm_create_bucket_total

    - + - +
    REST api/cluster/counter/tables/object_store_servercreate_bucket_total
    Unit: none
    Type: delta
    Base:
    root_user_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_servercreate_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    root_user_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_default_deny_access

    -

    Number of times access was denied by default and not through any policy statement.

    +

    ontaps3_svm_server_connection_close

    +

    Number of connection closes triggered by server due to fatal errors.

    @@ -47122,19 +51948,19 @@

    ontaps3_svm_default_deny_access

    - + - +
    REST api/cluster/counter/tables/object_store_serverdefault_deny_access
    Unit: none
    Type: delta
    Base:
    server_connection_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdefault_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    server_connection_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_bucket_failed

    -

    Number of failed Delete Bucket operations.

    +

    ontaps3_svm_signature_v2_reqs

    +

    Total number of object store server signature V2 requests

    @@ -47148,19 +51974,19 @@

    ontaps3_svm_delete_bucket_failed

    - + - +
    REST api/cluster/counter/tables/object_store_serverdelete_bucket_failed
    Unit: none
    Type: delta
    Base:
    signature_v2_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdelete_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    signature_v2_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_bucket_failed_client_close

    -

    Number of times Delete Bucket operation failed because client terminated connection while the operation was still pending on server.

    +

    ontaps3_svm_signature_v4_reqs

    +

    Total number of object store server signature V4 requests

    @@ -47174,19 +52000,19 @@

    ontaps3_svm_delete_bucket

    - + - +
    REST api/cluster/counter/tables/object_store_serverdelete_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    signature_v4_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdelete_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    signature_v4_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_bucket_latency

    -

    Average latency for Delete Bucket operations.

    +

    ontaps3_svm_tagging

    +

    Number of requests with tagging specified.

    @@ -47200,19 +52026,19 @@

    ontaps3_svm_delete_bucket_latency

    - + - +
    REST api/cluster/counter/tables/object_store_serverdelete_bucket_latency
    Unit: microsec
    Type: average
    Base: delete_bucket_total
    tagging
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdelete_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_bucket_latency_base
    tagging
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_bucket_rate

    -

    Number of Delete Bucket operations per second.

    +

    ontaps3_svm_upload_part_failed

    +

    Number of failed Upload Part operations.

    @@ -47226,19 +52052,19 @@

    ontaps3_svm_delete_bucket_rateUnit: per_sec
    Type: rate
    Base: +

    - +
    upload_part_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdelete_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    upload_part_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_bucket_total

    -

    Number of Delete Bucket operations.

    +

    ontaps3_svm_upload_part_failed_client_close

    +

    Number of times Upload Part operation failed because client terminated connection while the operation was still pending on server.

    @@ -47252,19 +52078,19 @@

    ontaps3_svm_delete_bucket_total

    - + - +
    REST api/cluster/counter/tables/object_store_serverdelete_bucket_total
    Unit: none
    Type: delta
    Base:
    upload_part_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdelete_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    upload_part_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_object_failed

    -

    Number of failed DELETE object operations

    +

    ontaps3_svm_upload_part_latency

    +

    Average latency for Upload Part operations.

    @@ -47278,19 +52104,19 @@

    ontaps3_svm_delete_object_failed

    - + - +
    REST api/cluster/counter/tables/object_store_serverdelete_object_failed
    Unit: none
    Type: delta
    Base:
    upload_part_latency
    Unit: microsec
    Type: average
    Base: upload_part_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdelete_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    upload_part_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: upload_part_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_object_failed_client_close

    -

    Number of times DELETE object operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    ontaps3_svm_upload_part_rate

    +

    Number of Upload Part operations per second.

    @@ -47304,19 +52130,19 @@

    ontaps3_svm_delete_object

    - + - +
    REST api/cluster/counter/tables/object_store_serverdelete_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    upload_part_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdelete_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    upload_part_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_object_latency

    -

    Average latency for DELETE object operations

    +

    ontaps3_svm_upload_part_total

    +

    Number of Upload Part operations.

    @@ -47330,19 +52156,19 @@

    ontaps3_svm_delete_object_latency

    - + - +
    REST api/cluster/counter/tables/object_store_serverdelete_object_latency
    Unit: microsec
    Type: average
    Base: delete_object_total
    upload_part_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPI perf-object-get-instances object_store_serverdelete_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_object_latency_base
    upload_part_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    ontaps3_svm_delete_object_rate

    -

    Number of DELETE object operations per second

    +

    ontaps3_used_percent

    +

    The used_percent metric the percentage of a bucket's total capacity that is currently being used.

    @@ -47355,20 +52181,14 @@

    ontaps3_svm_delete_object_rateUnit: per_sec
    Type: rate
    Base: -

    - - - - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverdelete_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlapi/protocols/s3/bucketslogical_used_size, sizeconf/rest/9.7.0/ontap_s3.yaml
    -

    ontaps3_svm_delete_object_tagging_failed

    -

    Number of failed DELETE object tagging operations.

    +

    path_read_data

    +

    The average read throughput in kilobytes per second read from the indicated target port by the controller.

    @@ -47381,20 +52201,20 @@

    ontaps3_svm_delete_object_tagg

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverdelete_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/pathread_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/path.yaml
    ZAPIperf-object-get-instances object_store_serverdelete_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances pathread_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yaml
    -

    ontaps3_svm_delete_object_tagging_failed_client_close

    -

    Number of times DELETE object tagging operation failed because client terminated connection while the operation was still pending on server.

    +

    path_read_iops

    +

    The number of I/O read operations sent from the initiator port to the indicated target port.

    @@ -47407,20 +52227,20 @@

    ontaps3_svm_delet

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverdelete_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/pathread_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/path.yaml
    ZAPIperf-object-get-instances object_store_serverdelete_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances pathread_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yaml
    -

    ontaps3_svm_delete_object_tagging_latency

    -

    Average latency for DELETE object tagging operations.

    +

    path_read_latency

    +

    The average latency of I/O read operations sent from this controller to the indicated target port.

    @@ -47433,20 +52253,20 @@

    ontaps3_svm_delete_object_tag

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverdelete_object_tagging_latency
    Unit: microsec
    Type: average
    Base: delete_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/pathread_latency
    Unit: microsec
    Type: average
    Base: read_iops
    conf/restperf/9.12.0/path.yaml
    ZAPIperf-object-get-instances object_store_serverdelete_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances pathread_latency
    Unit: microsec
    Type: average
    Base: read_iops
    conf/zapiperf/cdot/9.8.0/path.yaml
    -

    ontaps3_svm_delete_object_tagging_rate

    -

    Number of DELETE object tagging operations per second.

    +

    path_total_data

    +

    The average throughput in kilobytes per second read and written from/to the indicated target port by the controller.

    @@ -47459,20 +52279,20 @@

    ontaps3_svm_delete_object_taggin

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverdelete_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/pathtotal_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/path.yaml
    ZAPIperf-object-get-instances object_store_serverdelete_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances pathtotal_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yaml
    -

    ontaps3_svm_delete_object_tagging_total

    -

    Number of DELETE object tagging operations.

    +

    path_total_iops

    +

    The number of total read/write I/O operations sent from the initiator port to the indicated target port.

    @@ -47485,20 +52305,20 @@

    ontaps3_svm_delete_object_taggi

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverdelete_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/pathtotal_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/path.yaml
    ZAPIperf-object-get-instances object_store_serverdelete_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances pathtotal_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yaml
    -

    ontaps3_svm_delete_object_total

    -

    Number of DELETE object operations

    +

    path_write_data

    +

    The average write throughput in kilobytes per second written to the indicated target port by the controller.

    @@ -47511,20 +52331,20 @@

    ontaps3_svm_delete_object_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverdelete_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/pathwrite_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/path.yaml
    ZAPIperf-object-get-instances object_store_serverdelete_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances pathwrite_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yaml
    -

    ontaps3_svm_explicit_deny_access

    -

    Number of times access was denied explicitly by a policy statement.

    +

    path_write_iops

    +

    The number of I/O write operations sent from the initiator port to the indicated target port.

    @@ -47537,20 +52357,20 @@

    ontaps3_svm_explicit_deny_access

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverexplicit_deny_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/pathwrite_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/path.yaml
    ZAPIperf-object-get-instances object_store_serverexplicit_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances pathwrite_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yaml
    -

    ontaps3_svm_get_bucket_acl_failed

    -

    Number of failed GET Bucket ACL operations

    +

    path_write_latency

    +

    The average latency of I/O write operations sent from this controller to the indicated target port.

    @@ -47563,20 +52383,20 @@

    ontaps3_svm_get_bucket_acl_failed

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_bucket_acl_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/pathwrite_latency
    Unit: microsec
    Type: average
    Base: write_iops
    conf/restperf/9.12.0/path.yaml
    ZAPIperf-object-get-instances object_store_serverget_bucket_acl_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances pathwrite_latency
    Unit: microsec
    Type: average
    Base: write_iops
    conf/zapiperf/cdot/9.8.0/path.yaml
    -

    ontaps3_svm_get_bucket_acl_total

    -

    Number of GET Bucket ACL operations

    +

    plex_disk_busy

    +

    The utilization percent of the disk. plex_disk_busy is disk_busy aggregated by plex.

    @@ -47589,20 +52409,20 @@

    ontaps3_svm_get_bucket_acl_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_bucket_acl_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentdisk_busy_percent
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_bucket_acl_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_bucket_versioning_failed

    -

    Number of failed Get Bucket Versioning operations

    +

    plex_disk_capacity

    +

    Disk capacity in MB. plex_disk_capacity is disk_capacity aggregated by plex.

    @@ -47615,20 +52435,20 @@

    ontaps3_svm_get_bucket_version

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_bucket_versioning_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_bucket_versioning_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_bucket_versioning_total

    -

    Number of Get Bucket Versioning operations.

    +

    plex_disk_cp_read_chain

    +

    Average number of blocks transferred in each consistency point read operation during a CP. plex_disk_cp_read_chain is disk_cp_read_chain aggregated by plex.

    @@ -47641,20 +52461,20 @@

    ontaps3_svm_get_bucket_versioni

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_bucket_versioning_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_bucket_versioning_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_data

    -

    Rate of GET object data transfers per second

    +

    plex_disk_cp_read_latency

    +

    Average latency per block in microseconds for consistency point read operations. plex_disk_cp_read_latency is disk_cp_read_latency aggregated by plex.

    @@ -47667,20 +52487,20 @@

    ontaps3_svm_get_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_acl_failed

    -

    Number of failed GET Object ACL operations

    +

    plex_disk_cp_reads

    +

    Number of disk read operations initiated each second for consistency point processing. plex_disk_cp_reads is disk_cp_reads aggregated by plex.

    @@ -47693,20 +52513,20 @@

    ontaps3_svm_get_object_acl_failed

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_acl_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_acl_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_acl_total

    -

    Number of GET Object ACL operations

    +

    plex_disk_io_pending

    +

    Average number of I/Os issued to the disk for which we have not yet received the response. plex_disk_io_pending is disk_io_pending aggregated by plex.

    @@ -47719,20 +52539,20 @@

    ontaps3_svm_get_object_acl_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_acl_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_acl_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_failed

    -

    Number of failed GET object operations

    +

    plex_disk_io_queued

    +

    Number of I/Os queued to the disk but not yet issued. plex_disk_io_queued is disk_io_queued aggregated by plex.

    @@ -47745,20 +52565,20 @@

    ontaps3_svm_get_object_failedUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_failed_client_close

    -

    Number of times GET object operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    plex_disk_total_data

    +

    Total throughput for user operations per second. plex_disk_total_data is disk_total_data aggregated by plex.

    @@ -47771,20 +52591,20 @@

    ontaps3_svm_get_object_faile

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_lastbyte_latency

    -

    Average last-byte latency for GET object operations

    +

    plex_disk_total_transfers

    +

    Total number of disk operations involving data transfer initiated per second. plex_disk_total_transfers is disk_total_transfers aggregated by plex.

    @@ -47797,20 +52617,20 @@

    ontaps3_svm_get_object_lastbyte

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_lastbyte_latency
    Unit: microsec
    Type: average
    Base: get_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituenttotal_transfer_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_lastbyte_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_lastbyte_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_latency

    -

    Average first-byte latency for GET object operations

    +

    plex_disk_user_read_blocks

    +

    Number of blocks transferred for user read operations per second. plex_disk_user_read_blocks is disk_user_read_blocks aggregated by plex.

    @@ -47823,20 +52643,20 @@

    ontaps3_svm_get_object_latencyUnit: microsec
    Type: average
    Base: get_object_total -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_read_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_rate

    -

    Number of GET object operations per second

    +

    plex_disk_user_read_chain

    +

    Average number of blocks transferred in each user read operation. plex_disk_user_read_chain is disk_user_read_chain aggregated by plex.

    @@ -47849,20 +52669,20 @@

    ontaps3_svm_get_object_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_tagging_failed

    -

    Number of failed GET object tagging operations

    +

    plex_disk_user_read_latency

    +

    Average latency per block in microseconds for user read operations. plex_disk_user_read_latency is disk_user_read_latency aggregated by plex.

    @@ -47875,20 +52695,20 @@

    ontaps3_svm_get_object_tagging_fa

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_tagging_failed_client_close

    -

    Number of times GET object tagging operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    plex_disk_user_reads

    +

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. plex_disk_user_reads is disk_user_reads aggregated by plex.

    @@ -47901,20 +52721,20 @@

    ontaps3_svm_get_obje

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_tagging_latency

    -

    Average latency for GET object tagging operations

    +

    plex_disk_user_write_blocks

    +

    Number of blocks transferred for user write operations per second. plex_disk_user_write_blocks is disk_user_write_blocks aggregated by plex.

    @@ -47927,20 +52747,20 @@

    ontaps3_svm_get_object_tagging_l

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_tagging_latency
    Unit: microsec
    Type: average
    Base: get_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_tagging_rate

    -

    Number of GET object tagging operations per second

    +

    plex_disk_user_write_chain

    +

    Average number of blocks transferred in each user write operation. plex_disk_user_write_chain is disk_user_write_chain aggregated by plex.

    @@ -47953,20 +52773,20 @@

    ontaps3_svm_get_object_tagging_rate

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_write_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_tagging_total

    -

    Number of GET object tagging operations

    +

    plex_disk_user_write_latency

    +

    Average latency per block in microseconds for user write operations. plex_disk_user_write_latency is disk_user_write_latency aggregated by plex.

    @@ -47979,20 +52799,20 @@

    ontaps3_svm_get_object_tagging_tot

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverget_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_get_object_total

    -

    Number of GET object operations

    +

    plex_disk_user_writes

    +

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. plex_disk_user_writes is disk_user_writes aggregated by plex.

    @@ -48005,20 +52825,20 @@

    ontaps3_svm_get_object_totalUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_write_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverget_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_group_policy_evaluated

    -

    Number of times group policies were evaluated.

    +

    qos_concurrency

    +

    This is the average number of concurrent requests for the workload.

    @@ -48031,20 +52851,33 @@

    ontaps3_svm_group_policy_evaluated

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_servergroup_policy_evaluated
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumeconcurrency
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_servergroup_policy_evaluated
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumeconcurrency
    Unit: none
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_head_bucket_failed

    -

    Number of failed HEAD bucket operations

    +

    qos_detail_resource_latency

    +

    This refers to the average latency for workloads within the subsystems of Data ONTAP. These subsystems are the various modules or components within the system that could contribute to delays or latency during data or task processing. The calculated latency includes both the processing time within the subsystem and the waiting time at that subsystem. Below is the description of subsystems' latency.

    +
      +
    • frontend: Represents the delays in the network layer of ONTAP.
    • +
    • backend: Represents the delays in the data/WAFL layer of ONTAP.
    • +
    • cluster: Represents delays caused by the cluster switches, cables, and adapters which physically connect clustered nodes.If the cluster interconnect component is in contention, it means high wait time for I/O requests at the cluster interconnect is impacting the latency of one or more workloads.
    • +
    • cp: Represents delays due to buffered write flushes, called consistency points (cp).
    • +
    • disk: Represents slowness due to attached hard drives or solid state drives.
    • +
    • network: Note: Typically these latencies only apply to SAN not NAS. Represents the wait time of I/O requests by the external networking protocols on the cluster. The wait time is time spent waiting for transfer ready transactions to finish before the cluster can respond to an I/O request. If the network component is in contention, it means high wait time at the protocol layer is impacting the latency of one or more workloads.
    • +
    • nvlog: Represents delays due to mirroring writes to the NVRAM/NVLOG memory and to the HA partner NVRAM/NVLOG memory.
    • +
    • suspend: Represents delays due to operations suspending on a delay mechanism. Typically this is diagnosed by NetApp Support.
    • +
    • throttle: Represents the throughput maximum (ceiling) setting of the storage Quality of Service (QoS) policy group assigned to the workload. If the policy group component is in contention, it means all workloads in the policy group are being throttled by the set throughput limit, which is impacting the latency of one or more of those workloads.
    • +
    • qos_min: Represents the latency to a workload that is being caused by QoS throughput floor (expected) setting assigned to other workloads. If the QoS floor set on certain workloads use the majority of the bandwidth to guarantee the promised throughput, other workloads will be throttled and see more latency.
    • +
    • cloud: Represents the software component in the cluster involved with I/O processing between the cluster and the cloud tier on which user data is stored. If the cloud latency component is in contention, it means that a large amount of reads from volumes that are hosted on the cloud tier are impacting the latency of one or more workloads.
    • +
    @@ -48057,20 +52890,20 @@

    ontaps3_svm_head_bucket_failedUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_detailHarvest generated
    Unit: microseconds
    Type: average
    Base: ops
    conf/restperf/9.12.0/workload_detail.yaml
    ZAPIperf-object-get-instances object_store_serverhead_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_detailHarvest generated
    Unit: microseconds
    Type: average
    Base: ops
    conf/zapiperf/9.12.0/workload_detail.yaml
    -

    ontaps3_svm_head_bucket_failed_client_close

    -

    Number of times HEAD bucket operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    qos_detail_service_time_latency

    +

    This refers to the average service time for workload within the subsystems of the Data ONTAP. These subsystems are the various modules or components within the system that could contribute to delays or latency during data or task processing. This latency is the processing time within the subsystem.

    @@ -48083,20 +52916,20 @@

    ontaps3_svm_head_bucket_fai

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverhead_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_detailHarvest generated
    Unit: microseconds
    Type: average
    Base: ops
    conf/restperf/9.12.0/workload_detail.yaml
    ZAPIperf-object-get-instances object_store_serverhead_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_detailHarvest generated
    Unit: microseconds
    Type: average
    Base: ops
    conf/zapiperf/9.12.0/workload_detail.yaml
    -

    ontaps3_svm_head_bucket_latency

    -

    Average latency for HEAD bucket operations

    +

    qos_latency

    +

    This is the average response time for requests that were initiated by the workload.

    @@ -48109,20 +52942,20 @@

    ontaps3_svm_head_bucket_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverhead_bucket_latency
    Unit: microsec
    Type: average
    Base: head_bucket_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumelatency
    Unit: microsec
    Type: average
    Base: ops
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverhead_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_bucket_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: ops
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_head_bucket_rate

    -

    Number of HEAD bucket operations per second

    +

    qos_ops

    +

    This field is the workload's rate of operations that completed during the measurement interval; measured per second.

    @@ -48135,20 +52968,20 @@

    ontaps3_svm_head_bucket_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumeops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverhead_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumeops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_head_bucket_total

    -

    Number of HEAD bucket operations

    +

    qos_other_ops

    +

    This is the rate of this workload's other operations that completed during the measurement interval.

    @@ -48161,20 +52994,20 @@

    ontaps3_svm_head_bucket_totalUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qosother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload.yaml
    ZAPIperf-object-get-instances object_store_serverhead_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumeother_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_head_object_failed

    -

    Number of failed HEAD Object operations

    +

    qos_read_data

    +

    This is the amount of data read per second from the filer by the workload.

    @@ -48187,20 +53020,20 @@

    ontaps3_svm_head_object_failedUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumeread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverhead_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumeread_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_head_object_failed_client_close

    -

    Number of times HEAD object operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    qos_read_io_type

    +

    This is the percentage of read requests served from various components (such as buffer cache, ext_cache, disk, etc.).

    @@ -48213,20 +53046,20 @@

    ontaps3_svm_head_object_fai

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverhead_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumeread_io_type_percent
    Unit: percent
    Type: percent
    Base: read_io_type_base
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverhead_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumeread_io_type
    Unit: percent
    Type: percent
    Base: read_io_type_base
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_head_object_latency

    -

    Average latency for HEAD object operations

    +

    qos_read_latency

    +

    This is the average response time for read requests that were initiated by the workload.

    @@ -48239,20 +53072,20 @@

    ontaps3_svm_head_object_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverhead_object_latency
    Unit: microsec
    Type: average
    Base: head_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumeread_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverhead_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumeread_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_head_object_rate

    -

    Number of HEAD Object operations per second

    +

    qos_read_ops

    +

    This is the rate of this workload's read operations that completed during the measurement interval.

    @@ -48265,20 +53098,20 @@

    ontaps3_svm_head_object_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumeread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverhead_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumeread_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_head_object_total

    -

    Number of HEAD Object operations

    +

    qos_sequential_reads

    +

    This is the percentage of reads, performed on behalf of the workload, that were sequential.

    @@ -48291,20 +53124,20 @@

    ontaps3_svm_head_object_totalUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumesequential_reads_percent
    Unit: percent
    Type: percent
    Base: sequential_reads_base
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverhead_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumesequential_reads
    Unit: percent
    Type: percent,no-zero-values
    Base: sequential_reads_base
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_initiate_multipart_upload_failed

    -

    Number of failed Initiate Multipart Upload operations.

    +

    qos_sequential_writes

    +

    This is the percentage of writes, performed on behalf of the workload, that were sequential. This counter is only available on platforms with more than 4GB of NVRAM.

    @@ -48317,20 +53150,20 @@

    ontaps3_svm_initiate_multi

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumesequential_writes_percent
    Unit: percent
    Type: percent
    Base: sequential_writes_base
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverinitiate_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumesequential_writes
    Unit: percent
    Type: percent,no-zero-values
    Base: sequential_writes_base
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_initiate_multipart_upload_failed_client_close

    -

    Number of times Initiate Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    +

    qos_total_data

    +

    This is the total amount of data read/written per second from/to the filer by the workload.

    @@ -48343,20 +53176,20 @@

    ontaps3_svm_i

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumetotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverinitiate_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumetotal_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_initiate_multipart_upload_latency

    -

    Average latency for Initiate Multipart Upload operations.

    +

    qos_write_data

    +

    This is the amount of data written per second to the filer by the workload.

    @@ -48369,20 +53202,20 @@

    ontaps3_svm_initiate_mult

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: initiate_multipart_upload_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumewrite_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverinitiate_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: initiate_multipart_upload_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumewrite_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_initiate_multipart_upload_rate

    -

    Number of Initiate Multipart Upload operations per second.

    +

    qos_write_latency

    +

    This is the average response time for write requests that were initiated by the workload.

    @@ -48395,20 +53228,20 @@

    ontaps3_svm_initiate_multipa

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumewrite_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverinitiate_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumewrite_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_initiate_multipart_upload_total

    -

    Number of Initiate Multipart Upload operations.

    +

    qos_write_ops

    +

    This is the workload's write operations that completed during the measurement interval; measured per second.

    @@ -48421,20 +53254,20 @@

    ontaps3_svm_initiate_multip

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qos_volumewrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yaml
    ZAPIperf-object-get-instances object_store_serverinitiate_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances workload_volumewrite_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yaml
    -

    ontaps3_svm_input_flow_control_entry

    -

    Number of times input flow control was entered.

    +

    qtree_cifs_ops

    +

    Number of CIFS operations per second to the qtree

    @@ -48447,20 +53280,20 @@

    ontaps3_svm_input_flow_control_ent

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverinput_flow_control_entry
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qtreecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverinput_flow_control_entry
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances qtreecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_input_flow_control_exit

    -

    Number of times input flow control was exited.

    +

    qtree_id

    +

    The identifier for the qtree, unique within the qtree's volume.

    @@ -48473,20 +53306,14 @@

    ontaps3_svm_input_flow_control_exit

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverinput_flow_control_exit
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverinput_flow_control_exit
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlapi/storage/qtreesidconf/rest/9.12.0/qtree.yaml
    -

    ontaps3_svm_list_buckets_failed

    -

    Number of failed LIST Buckets operations

    +

    qtree_internal_ops

    +

    Number of internal operations generated by activites such as snapmirror and backup per second to the qtree

    @@ -48499,20 +53326,20 @@

    ontaps3_svm_list_buckets_failed

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_buckets_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qtreeinternal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_buckets_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances qtreeinternal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_buckets_failed_client_close

    -

    Number of times LIST Bucket operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    qtree_nfs_ops

    +

    Number of NFS operations per second to the qtree

    @@ -48525,20 +53352,20 @@

    ontaps3_svm_list_buckets_f

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_buckets_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qtreenfs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_buckets_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances qtreenfs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_buckets_latency

    -

    Average latency for LIST Buckets operations

    +

    qtree_total_ops

    +

    Summation of NFS ops, CIFS ops, CSS ops and internal ops

    @@ -48551,20 +53378,20 @@

    ontaps3_svm_list_buckets_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_buckets_latency
    Unit: microsec
    Type: average
    Base: list_buckets_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/qtreetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_buckets_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances qtreetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_buckets_rate

    -

    Number of LIST Buckets operations per second

    +

    quota_disk_limit

    +

    Maximum amount of disk space, in kilobytes, allowed for the quota target (hard disk space limit). The value is -1 if the limit is unlimited.

    @@ -48577,20 +53404,20 @@

    ontaps3_svm_list_buckets_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsspace.hard_limitconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_buckets_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterdisk-limitconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_buckets_total

    -

    Number of LIST Buckets operations

    +

    quota_disk_used

    +

    Current amount of disk space, in kilobytes, used by the quota target.

    @@ -48603,20 +53430,20 @@

    ontaps3_svm_list_buckets_totalUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsspace.used.totalconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_buckets_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterdisk-usedconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_object_versions_failed

    -

    Number of failed LIST object versions operations

    +

    quota_disk_used_pct_disk_limit

    +

    Current disk space used expressed as a percentage of hard disk limit.

    @@ -48629,20 +53456,20 @@

    ontaps3_svm_list_object_version

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_object_versions_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsspace.used.hard_limit_percentconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_object_versions_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterdisk-used-pct-disk-limitconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_object_versions_failed_client_close

    -

    Number of times LIST object versions operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    quota_disk_used_pct_soft_disk_limit

    +

    Current disk space used expressed as a percentage of soft disk limit.

    @@ -48655,20 +53482,20 @@

    ontaps3_svm_list_o

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_object_versions_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsspace.used.soft_limit_percentconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_object_versions_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterdisk-used-pct-soft-disk-limitconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_object_versions_latency

    -

    Average latency for LIST Object versions operations

    +

    quota_disk_used_pct_threshold

    +

    Current disk space used expressed as a percentage of threshold.

    @@ -48680,21 +53507,15 @@

    ontaps3_svm_list_object_versio

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_object_versions_latency
    Unit: microsec
    Type: average
    Base: list_object_versions_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverlist_object_versions_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_object_versions_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterdisk-used-pct-thresholdconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_object_versions_rate

    -

    Number of LIST Object Versions operations per second

    +

    quota_file_limit

    +

    Maximum number of files allowed for the quota target (hard files limit). The value is -1 if the limit is unlimited.

    @@ -48707,20 +53528,20 @@

    ontaps3_svm_list_object_versions_

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_object_versions_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsfiles.hard_limitconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_object_versions_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterfile-limitconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_object_versions_total

    -

    Number of LIST Object Versions operations

    +

    quota_files_used

    +

    Current number of files used by the quota target.

    @@ -48733,20 +53554,20 @@

    ontaps3_svm_list_object_versions

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_object_versions_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsfiles.used.totalconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_object_versions_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterfiles-usedconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_objects_failed

    -

    Number of failed LIST objects operations

    +

    quota_files_used_pct_file_limit

    +

    Current number of files used expressed as a percentage of hard file limit.

    @@ -48759,20 +53580,20 @@

    ontaps3_svm_list_objects_failed

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_objects_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsfiles.used.hard_limit_percentconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_objects_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterfiles-used-pct-file-limitconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_objects_failed_client_close

    -

    Number of times LIST objects operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    quota_files_used_pct_soft_file_limit

    +

    Current number of files used expressed as a percentage of soft file limit.

    @@ -48785,20 +53606,20 @@

    ontaps3_svm_list_objects_f

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_objects_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsfiles.used.soft_limit_percentconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_objects_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-iterfiles-used-pct-soft-file-limitconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_objects_latency

    -

    Average latency for LIST Objects operations

    +

    quota_soft_disk_limit

    +

    soft disk space limit, in kilobytes, for the quota target. The value is -1 if the limit is unlimited.

    @@ -48811,20 +53632,20 @@

    ontaps3_svm_list_objects_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_objects_latency
    Unit: microsec
    Type: average
    Base: list_objects_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsspace.soft_limitconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_objects_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_objects_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-itersoft-disk-limitconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_objects_rate

    -

    Number of LIST Objects operations per second

    +

    quota_soft_file_limit

    +

    Soft file limit, in number of files, for the quota target. The value is -1 if the limit is unlimited.

    @@ -48837,20 +53658,20 @@

    ontaps3_svm_list_objects_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/quota/reportsfiles.soft_limitconf/rest/9.12.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_objects_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlquota-report-itersoft-file-limitconf/zapi/cdot/9.8.0/qtree.yaml
    -

    ontaps3_svm_list_objects_total

    -

    Number of LIST Objects operations

    +

    quota_threshold

    +

    Disk space threshold, in kilobytes, for the quota target. The value is -1 if the limit is unlimited.

    @@ -48862,21 +53683,21 @@

    ontaps3_svm_list_objects_totalUnit: none
    Type: delta
    Base: -

    + + + + - - - - + + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlZAPIquota-report-iterthresholdconf/zapi/cdot/9.8.0/qtree.yaml
    ZAPIperf-object-get-instances object_store_serverlist_objects_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlRESTNAHarvest generatedconf/rest/9.12.0/qtree.yaml
    -

    ontaps3_svm_list_uploads_failed

    -

    Number of failed LIST Upload operations

    +

    raid_disk_busy

    +

    The utilization percent of the disk. raid_disk_busy is disk_busy aggregated by raid.

    @@ -48889,20 +53710,20 @@

    ontaps3_svm_list_uploads_failed

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_uploads_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentdisk_busy_percent
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverlist_uploads_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_list_uploads_failed_client_close

    -

    Number of times LIST Upload operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    raid_disk_capacity

    +

    Disk capacity in MB. raid_disk_capacity is disk_capacity aggregated by raid.

    @@ -48915,20 +53736,20 @@

    ontaps3_svm_list_uploads_f

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_uploads_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentcapacity
    Unit: mb
    Type: raw
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverlist_uploads_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_list_uploads_latency

    -

    Average latency for LIST Upload operations

    +

    raid_disk_cp_read_chain

    +

    Average number of blocks transferred in each consistency point read operation during a CP. raid_disk_cp_read_chain is disk_cp_read_chain aggregated by raid.

    @@ -48941,20 +53762,20 @@

    ontaps3_svm_list_uploads_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverlist_uploads_latency
    Unit: microsec
    Type: average
    Base: list_uploads_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverlist_uploads_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_uploads_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_list_uploads_rate

    -

    Number of LIST Upload operations per second

    +

    raid_disk_cp_read_latency

    +

    Average latency per block in microseconds for consistency point read operations. raid_disk_cp_read_latency is disk_cp_read_latency aggregated by raid.

    @@ -48967,20 +53788,20 @@

    ontaps3_svm_list_uploads_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverlist_uploads_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_list_uploads_total

    -

    Number of LIST Upload operations

    +

    raid_disk_cp_reads

    +

    Number of disk read operations initiated each second for consistency point processing. raid_disk_cp_reads is disk_cp_reads aggregated by raid.

    @@ -48993,20 +53814,20 @@

    ontaps3_svm_list_uploads_totalUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentcp_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverlist_uploads_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_max_cmds_per_connection

    -

    Maximum commands pipelined at any instance on a connection.

    +

    raid_disk_io_pending

    +

    Average number of I/Os issued to the disk for which we have not yet received the response. raid_disk_io_pending is disk_io_pending aggregated by raid.

    @@ -49019,20 +53840,20 @@

    ontaps3_svm_max_cmds_per_connection

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_servermaximum_commands_per_connection
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_servermax_cmds_per_connection
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_max_connected_connections

    -

    Maximum number of object store server connections established at one time

    +

    raid_disk_io_queued

    +

    Number of I/Os queued to the disk but not yet issued. raid_disk_io_queued is disk_io_queued aggregated by raid.

    @@ -49045,20 +53866,20 @@

    ontaps3_svm_max_connected_connect

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_servermaximum_connected_connections
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_servermax_connected_connections
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_max_requests_outstanding

    -

    Maximum number of object store server requests in process at one time

    +

    raid_disk_total_data

    +

    Total throughput for user operations per second. raid_disk_total_data is disk_total_data aggregated by raid.

    @@ -49071,20 +53892,20 @@

    ontaps3_svm_max_requests_outstandi

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_servermaximum_requests_outstanding
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_servermax_requests_outstanding
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_multi_delete_reqs

    -

    Total number of object store server multiple object delete requests

    +

    raid_disk_total_transfers

    +

    Total number of disk operations involving data transfer initiated per second. raid_disk_total_transfers is disk_total_transfers aggregated by raid.

    @@ -49097,20 +53918,20 @@

    ontaps3_svm_multi_delete_reqsUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituenttotal_transfer_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_servermulti_delete_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_output_flow_control_entry

    -

    Number of output flow control was entered.

    +

    raid_disk_user_read_blocks

    +

    Number of blocks transferred for user read operations per second. raid_disk_user_read_blocks is disk_user_read_blocks aggregated by raid.

    @@ -49123,20 +53944,20 @@

    ontaps3_svm_output_flow_control_e

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serveroutput_flow_control_entry
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_read_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serveroutput_flow_control_entry
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_output_flow_control_exit

    -

    Number of times output flow control was exited.

    +

    raid_disk_user_read_chain

    +

    Average number of blocks transferred in each user read operation. raid_disk_user_read_chain is disk_user_read_chain aggregated by raid.

    @@ -49149,20 +53970,20 @@

    ontaps3_svm_output_flow_control_ex

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serveroutput_flow_control_exit
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_read_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serveroutput_flow_control_exit
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_presigned_url_reqs

    -

    Total number of presigned object store server URL requests.

    +

    raid_disk_user_read_latency

    +

    Average latency per block in microseconds for user read operations. raid_disk_user_read_latency is disk_user_read_latency aggregated by raid.

    @@ -49175,20 +53996,20 @@

    ontaps3_svm_presigned_url_reqsUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverpresigned_url_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_put_bucket_versioning_failed

    -

    Number of failed Put Bucket Versioning operations

    +

    raid_disk_user_reads

    +

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. raid_disk_user_reads is disk_user_reads aggregated by raid.

    @@ -49201,20 +54022,20 @@

    ontaps3_svm_put_bucket_version

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverput_bucket_versioning_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_read_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverput_bucket_versioning_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_put_bucket_versioning_total

    -

    Number of Put Bucket Versioning operations.

    +

    raid_disk_user_write_blocks

    +

    Number of blocks transferred for user write operations per second. raid_disk_user_write_blocks is disk_user_write_blocks aggregated by raid.

    @@ -49227,20 +54048,20 @@

    ontaps3_svm_put_bucket_versioni

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverput_bucket_versioning_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverput_bucket_versioning_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_put_data

    -

    Rate of PUT object data transfers per second

    +

    raid_disk_user_write_chain

    +

    Average number of blocks transferred in each user write operation. raid_disk_user_write_chain is disk_user_write_chain aggregated by raid.

    @@ -49253,20 +54074,20 @@

    ontaps3_svm_put_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_write_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverput_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_put_object_failed

    -

    Number of failed PUT object operations

    +

    raid_disk_user_write_latency

    +

    Average latency per block in microseconds for user write operations. raid_disk_user_write_latency is disk_user_write_latency aggregated by raid.

    @@ -49279,20 +54100,20 @@

    ontaps3_svm_put_object_failedUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_block_count
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_put_object_failed_client_close

    -

    Number of times PUT object operation failed due to the case where client closed the connection while the operation was still pending on server.

    +

    raid_disk_user_writes

    +

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. raid_disk_user_writes is disk_user_writes aggregated by raid.

    @@ -49305,20 +54126,20 @@

    ontaps3_svm_put_object_faile

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverput_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/disk:constituentuser_write_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_put_object_latency

    -

    Average latency for PUT object operations

    +

    rw_ctx_cifs_giveups

    +

    Array of number of given-ups of cifs ops because they rewind more than a certain threshold, categorized by their rewind reasons.

    @@ -49330,21 +54151,15 @@

    ontaps3_svm_put_object_latencyUnit: microsec
    Type: average
    Base: put_object_total -

    - - - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: put_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances rw_ctxcifs_giveups
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yaml
    -

    ontaps3_svm_put_object_rate

    -

    Number of PUT object operations per second

    +

    rw_ctx_cifs_rewinds

    +

    Array of number of rewinds for cifs ops based on their reasons.

    @@ -49356,21 +54171,15 @@

    ontaps3_svm_put_object_rateUnit: per_sec
    Type: rate
    Base: -

    - - - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances rw_ctxcifs_rewinds
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yaml
    -

    ontaps3_svm_put_object_tagging_failed

    -

    Number of failed PUT object tagging operations.

    +

    rw_ctx_nfs_giveups

    +

    Array of number of given-ups of nfs ops because they rewind more than a certain threshold, categorized by their rewind reasons.

    @@ -49382,21 +54191,15 @@

    ontaps3_svm_put_object_tagging_fa

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverput_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances rw_ctxnfs_giveups
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yaml
    -

    ontaps3_svm_put_object_tagging_failed_client_close

    -

    Number of times PUT object tagging operation failed because client terminated connection while the operation was still pending on server.

    +

    rw_ctx_nfs_rewinds

    +

    Array of number of rewinds for nfs ops based on their reasons.

    @@ -49408,21 +54211,15 @@

    ontaps3_svm_put_obje

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverput_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances rw_ctxnfs_rewinds
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yaml
    -

    ontaps3_svm_put_object_tagging_latency

    -

    Average latency for PUT object tagging operations.

    +

    rw_ctx_qos_flowcontrol

    +

    The number of times QoS limiting has enabled stream flowcontrol.

    @@ -49434,21 +54231,15 @@

    ontaps3_svm_put_object_tagging_l

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverput_object_tagging_latency
    Unit: microsec
    Type: average
    Base: put_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: put_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances rw_ctxqos_flowcontrol
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yaml
    -

    ontaps3_svm_put_object_tagging_rate

    -

    Number of PUT object tagging operations per second.

    +

    rw_ctx_qos_rewinds

    +

    The number of restarts after a rewind because of QoS limiting.

    @@ -49460,21 +54251,15 @@

    ontaps3_svm_put_object_tagging_rate

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverput_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances rw_ctxqos_rewinds
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yaml
    -

    ontaps3_svm_put_object_tagging_total

    -

    Number of PUT object tagging operations.

    +

    security_audit_destination_port

    +

    The destination port used to forward the message.

    @@ -49486,21 +54271,14 @@

    ontaps3_svm_put_object_tagging_tot

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverput_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlcluster-log-forward-get-itercluster-log-forward-info.portconf/zapi/cdot/9.8.0/security_audit_dest.yaml
    -

    ontaps3_svm_put_object_total

    -

    Number of PUT object operations

    +

    security_certificate_expiry_time

    @@ -49513,20 +54291,20 @@

    ontaps3_svm_put_object_totalUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/private/cli/security/certificateexpirationconf/rest/9.12.0/security_certificate.yaml
    ZAPIperf-object-get-instances object_store_serverput_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlsecurity-certificate-get-itercertificate-info.expiration-dateconf/zapi/cdot/9.8.0/security_certificate.yaml
    -

    ontaps3_svm_request_parse_errors

    -

    Number of request parser errors due to malformed requests.

    +

    security_ssh_max_instances

    +

    Maximum possible simultaneous connections.

    @@ -49539,20 +54317,14 @@

    ontaps3_svm_request_parse_errors

    - - - - - - - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverrequest_parse_errors
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances object_store_serverrequest_parse_errors
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlapi/security/sshmax_instancesconf/rest/9.12.0/security_ssh.yaml
    -

    ontaps3_svm_requests

    -

    Total number of object store server requests

    +

    shelf_average_ambient_temperature

    +

    Average temperature of all ambient sensors for shelf in Celsius.

    @@ -49565,20 +54337,20 @@

    ontaps3_svm_requestsUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverrequests
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_requests_outstanding

    -

    Number of object store server requests in process

    +

    shelf_average_fan_speed

    +

    Average fan speed for shelf in rpm.

    @@ -49591,20 +54363,20 @@

    ontaps3_svm_requests_outstanding

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverrequests_outstanding
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverrequests_outstanding
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_root_user_access

    -

    Number of times access was done by root user.

    +

    shelf_average_temperature

    +

    Average temperature of all non-ambient sensors for shelf in Celsius.

    @@ -49617,20 +54389,20 @@

    ontaps3_svm_root_user_accessUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverroot_user_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_server_connection_close

    -

    Number of connection closes triggered by server due to fatal errors.

    +

    shelf_disk_count

    +

    Disk count in a shelf.

    @@ -49643,20 +54415,20 @@

    ontaps3_svm_server_connection_close

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverserver_connection_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/storage/shelvesdisk_countconf/rest/9.12.0/shelf.yaml
    ZAPIperf-object-get-instances object_store_serverserver_connection_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlstorage-shelf-info-get-iterstorage-shelf-info.disk-countconf/zapi/cdot/9.8.0/shelf.yaml
    -

    ontaps3_svm_signature_v2_reqs

    -

    Total number of object store server signature V2 requests

    +

    shelf_max_fan_speed

    +

    Maximum fan speed for shelf in rpm.

    @@ -49669,20 +54441,20 @@

    ontaps3_svm_signature_v2_reqsUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serversignature_v2_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_signature_v4_reqs

    -

    Total number of object store server signature V4 requests

    +

    shelf_max_temperature

    +

    Maximum temperature of all non-ambient sensors for shelf in Celsius.

    @@ -49695,20 +54467,20 @@

    ontaps3_svm_signature_v4_reqsUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serversignature_v4_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_tagging

    -

    Number of requests with tagging specified.

    +

    shelf_min_ambient_temperature

    +

    Minimum temperature of all ambient sensors for shelf in Celsius.

    @@ -49721,20 +54493,20 @@

    ontaps3_svm_taggingUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_servertagging
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_upload_part_failed

    -

    Number of failed Upload Part operations.

    +

    shelf_min_fan_speed

    +

    Minimum fan speed for shelf in rpm.

    @@ -49747,20 +54519,20 @@

    ontaps3_svm_upload_part_failedUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverupload_part_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_upload_part_failed_client_close

    -

    Number of times Upload Part operation failed because client terminated connection while the operation was still pending on server.

    +

    shelf_min_temperature

    +

    Minimum temperature of all non-ambient sensors for shelf in Celsius.

    @@ -49773,20 +54545,20 @@

    ontaps3_svm_upload_part_fai

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverupload_part_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverupload_part_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_upload_part_latency

    -

    Average latency for Upload Part operations.

    +

    shelf_power

    +

    Power consumed by shelf in Watts.

    @@ -49799,20 +54571,20 @@

    ontaps3_svm_upload_part_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/object_store_serverupload_part_latency
    Unit: microsec
    Type: average
    Base: upload_part_total
    conf/restperf/9.14.1/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yaml
    ZAPIperf-object-get-instances object_store_serverupload_part_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: upload_part_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlNAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yaml
    -

    ontaps3_svm_upload_part_rate

    -

    Number of Upload Part operations per second.

    +

    smb2_close_latency

    +

    Average latency for SMB2_COM_CLOSE operations

    @@ -49825,20 +54597,20 @@

    ontaps3_svm_upload_part_rateUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/smb2close_latency
    Unit: microsec
    Type: average
    Base: close_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances object_store_serverupload_part_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances smb2close_latency
    Unit: microsec
    Type: average
    Base: close_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    ontaps3_svm_upload_part_total

    -

    Number of Upload Part operations.

    +

    smb2_close_latency_histogram

    +

    Latency histogram for SMB2_COM_CLOSE operations

    @@ -49851,20 +54623,20 @@

    ontaps3_svm_upload_part_totalUnit: none
    Type: delta
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/ontap_s3_svm.yamlapi/cluster/counter/tables/smb2close_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances object_store_serverupload_part_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yamlperf-object-get-instances smb2close_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    ontaps3_used_percent

    -

    The used_percent metric the percentage of a bucket's total capacity that is currently being used.

    +

    smb2_close_ops

    +

    Number of SMB2_COM_CLOSE operations

    @@ -49877,14 +54649,20 @@

    ontaps3_used_percentUnit: per_sec
    Type: rate
    Base: +

    + + + + + +
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances smb2close_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    path_read_data

    -

    The average read throughput in kilobytes per second read from the indicated target port by the controller.

    +

    smb2_create_latency

    +

    Average latency for SMB2_COM_CREATE operations

    @@ -49897,20 +54675,20 @@

    path_read_dataUnit: kb_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/path.yamlapi/cluster/counter/tables/smb2create_latency
    Unit: microsec
    Type: average
    Base: create_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances pathread_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yamlperf-object-get-instances smb2create_latency
    Unit: microsec
    Type: average
    Base: create_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    path_read_iops

    -

    The number of I/O read operations sent from the initiator port to the indicated target port.

    +

    smb2_create_latency_histogram

    +

    Latency histogram for SMB2_COM_CREATE operations

    @@ -49923,20 +54701,20 @@

    path_read_iopsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/path.yamlapi/cluster/counter/tables/smb2create_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances pathread_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yamlperf-object-get-instances smb2create_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    path_read_latency

    -

    The average latency of I/O read operations sent from this controller to the indicated target port.

    +

    smb2_create_ops

    +

    Number of SMB2_COM_CREATE operations

    @@ -49949,20 +54727,20 @@

    path_read_latencyUnit: microsec
    Type: average
    Base: read_iops -

    + + + - - - + + +
    conf/restperf/9.12.0/path.yamlapi/cluster/counter/tables/smb2create_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances pathread_latency
    Unit: microsec
    Type: average
    Base: read_iops
    conf/zapiperf/cdot/9.8.0/path.yamlperf-object-get-instances smb2create_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    path_total_data

    -

    The average throughput in kilobytes per second read and written from/to the indicated target port by the controller.

    +

    smb2_lock_latency

    +

    Average latency for SMB2_COM_LOCK operations

    @@ -49975,20 +54753,20 @@

    path_total_dataUnit: kb_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/path.yamlapi/cluster/counter/tables/smb2lock_latency
    Unit: microsec
    Type: average
    Base: lock_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances pathtotal_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yamlperf-object-get-instances smb2lock_latency
    Unit: microsec
    Type: average
    Base: lock_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    path_total_iops

    -

    The number of total read/write I/O operations sent from the initiator port to the indicated target port.

    +

    smb2_lock_latency_histogram

    +

    Latency histogram for SMB2_COM_LOCK operations

    @@ -50001,20 +54779,20 @@

    path_total_iopsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/path.yamlapi/cluster/counter/tables/smb2lock_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances pathtotal_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yamlperf-object-get-instances smb2lock_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    path_write_data

    -

    The average write throughput in kilobytes per second written to the indicated target port by the controller.

    +

    smb2_lock_ops

    +

    Number of SMB2_COM_LOCK operations

    @@ -50027,20 +54805,20 @@

    path_write_dataUnit: kb_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/path.yamlapi/cluster/counter/tables/smb2lock_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances pathwrite_data
    Unit: kb_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yamlperf-object-get-instances smb2lock_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    path_write_iops

    -

    The number of I/O write operations sent from the initiator port to the indicated target port.

    +

    smb2_negotiate_latency

    +

    Average latency for SMB2_COM_NEGOTIATE operations

    @@ -50053,20 +54831,20 @@

    path_write_iopsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/path.yamlapi/cluster/counter/tables/smb2negotiate_latency
    Unit: microsec
    Type: average
    Base: negotiate_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances pathwrite_iops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/path.yamlperf-object-get-instances smb2negotiate_latency
    Unit: microsec
    Type: average
    Base: negotiate_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    path_write_latency

    -

    The average latency of I/O write operations sent from this controller to the indicated target port.

    +

    smb2_negotiate_ops

    +

    Number of SMB2_COM_NEGOTIATE operations

    @@ -50079,20 +54857,20 @@

    path_write_latencyUnit: microsec
    Type: average
    Base: write_iops -

    + + + - - - + + +
    conf/restperf/9.12.0/path.yamlapi/cluster/counter/tables/smb2negotiate_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances pathwrite_latency
    Unit: microsec
    Type: average
    Base: write_iops
    conf/zapiperf/cdot/9.8.0/path.yamlperf-object-get-instances smb2negotiate_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_busy

    -

    The utilization percent of the disk. plex_disk_busy is disk_busy aggregated by plex.

    +

    smb2_oplock_break_latency

    +

    Average latency for SMB2_COM_OPLOCK_BREAK operations

    @@ -50105,20 +54883,20 @@

    plex_disk_busyUnit: percent
    Type: percent
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2oplock_break_latency
    Unit: microsec
    Type: average
    Base: oplock_break_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2oplock_break_latency
    Unit: microsec
    Type: average
    Base: oplock_break_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_capacity

    -

    Disk capacity in MB. plex_disk_capacity is disk_capacity aggregated by plex.

    +

    smb2_oplock_break_latency_histogram

    +

    Latency histogram for SMB2_COM_OPLOCK_BREAK operations

    @@ -50131,20 +54909,20 @@

    plex_disk_capacityUnit: mb
    Type: raw
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2oplock_break_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2oplock_break_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_cp_read_chain

    -

    Average number of blocks transferred in each consistency point read operation during a CP. plex_disk_cp_read_chain is disk_cp_read_chain aggregated by plex.

    +

    smb2_oplock_break_ops

    +

    Number of SMB2_COM_OPLOCK_BREAK operations

    @@ -50157,20 +54935,20 @@

    plex_disk_cp_read_chain

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2oplock_break_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2oplock_break_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_cp_read_latency

    -

    Average latency per block in microseconds for consistency point read operations. plex_disk_cp_read_latency is disk_cp_read_latency aggregated by plex.

    +

    smb2_query_directory_latency

    +

    Average latency for SMB2_COM_QUERY_DIRECTORY operations

    @@ -50183,20 +54961,20 @@

    plex_disk_cp_read_latencyUnit: microsec
    Type: average
    Base: cp_read_blocks -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2query_directory_latency
    Unit: microsec
    Type: average
    Base: query_directory_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2query_directory_latency
    Unit: microsec
    Type: average
    Base: query_directory_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_cp_reads

    -

    Number of disk read operations initiated each second for consistency point processing. plex_disk_cp_reads is disk_cp_reads aggregated by plex.

    +

    smb2_query_directory_latency_histogram

    +

    Latency histogram for SMB2_COM_QUERY_DIRECTORY operations

    @@ -50209,20 +54987,20 @@

    plex_disk_cp_readsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2query_directory_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2query_directory_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_io_pending

    -

    Average number of I/Os issued to the disk for which we have not yet received the response. plex_disk_io_pending is disk_io_pending aggregated by plex.

    +

    smb2_query_directory_ops

    +

    Number of SMB2_COM_QUERY_DIRECTORY operations

    @@ -50235,20 +55013,20 @@

    plex_disk_io_pendingUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2query_directory_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2query_directory_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_io_queued

    -

    Number of I/Os queued to the disk but not yet issued. plex_disk_io_queued is disk_io_queued aggregated by plex.

    +

    smb2_query_info_latency

    +

    Average latency for SMB2_COM_QUERY_INFO operations

    @@ -50261,20 +55039,20 @@

    plex_disk_io_queuedUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2query_info_latency
    Unit: microsec
    Type: average
    Base: query_info_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2query_info_latency
    Unit: microsec
    Type: average
    Base: query_info_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_total_data

    -

    Total throughput for user operations per second. plex_disk_total_data is disk_total_data aggregated by plex.

    +

    smb2_query_info_latency_histogram

    +

    Latency histogram for SMB2_COM_QUERY_INFO operations

    @@ -50287,20 +55065,20 @@

    plex_disk_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2query_info_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2query_info_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_total_transfers

    -

    Total number of disk operations involving data transfer initiated per second. plex_disk_total_transfers is disk_total_transfers aggregated by plex.

    +

    smb2_query_info_ops

    +

    Number of SMB2_COM_QUERY_INFO operations

    @@ -50313,20 +55091,20 @@

    plex_disk_total_transfersUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2query_info_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2query_info_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_user_read_blocks

    -

    Number of blocks transferred for user read operations per second. plex_disk_user_read_blocks is disk_user_read_blocks aggregated by plex.

    +

    smb2_read_latency

    +

    Average latency for SMB2_COM_READ operations

    @@ -50339,20 +55117,20 @@

    plex_disk_user_read_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_user_read_chain

    -

    Average number of blocks transferred in each user read operation. plex_disk_user_read_chain is disk_user_read_chain aggregated by plex.

    +

    smb2_read_ops

    +

    Number of SMB2_COM_READ operations

    @@ -50365,20 +55143,20 @@

    plex_disk_user_read_chainUnit: none
    Type: average
    Base: user_read_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_user_read_latency

    -

    Average latency per block in microseconds for user read operations. plex_disk_user_read_latency is disk_user_read_latency aggregated by plex.

    +

    smb2_session_setup_latency

    +

    Average latency for SMB2_COM_SESSION_SETUP operations

    @@ -50391,20 +55169,20 @@

    plex_disk_user_read_latencyUnit: microsec
    Type: average
    Base: user_read_block_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2session_setup_latency
    Unit: microsec
    Type: average
    Base: session_setup_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2session_setup_latency
    Unit: microsec
    Type: average
    Base: session_setup_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_user_reads

    -

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. plex_disk_user_reads is disk_user_reads aggregated by plex.

    +

    smb2_session_setup_latency_histogram

    +

    Latency histogram for SMB2_COM_SESSION_SETUP operations

    @@ -50417,20 +55195,20 @@

    plex_disk_user_readsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2session_setup_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2session_setup_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_user_write_blocks

    -

    Number of blocks transferred for user write operations per second. plex_disk_user_write_blocks is disk_user_write_blocks aggregated by plex.

    +

    smb2_session_setup_ops

    +

    Number of SMB2_COM_SESSION_SETUP operations

    @@ -50443,20 +55221,20 @@

    plex_disk_user_write_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2session_setup_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2session_setup_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_user_write_chain

    -

    Average number of blocks transferred in each user write operation. plex_disk_user_write_chain is disk_user_write_chain aggregated by plex.

    +

    smb2_set_info_latency

    +

    Average latency for SMB2_COM_SET_INFO operations

    @@ -50469,20 +55247,20 @@

    plex_disk_user_write_chainUnit: none
    Type: average
    Base: user_write_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2set_info_latency
    Unit: microsec
    Type: average
    Base: set_info_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2set_info_latency
    Unit: microsec
    Type: average
    Base: set_info_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_user_write_latency

    -

    Average latency per block in microseconds for user write operations. plex_disk_user_write_latency is disk_user_write_latency aggregated by plex.

    +

    smb2_set_info_latency_histogram

    +

    Latency histogram for SMB2_COM_SET_INFO operations

    @@ -50495,20 +55273,20 @@

    plex_disk_user_write_latencyUnit: microsec
    Type: average
    Base: user_write_block_count -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2set_info_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2set_info_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    plex_disk_user_writes

    -

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. plex_disk_user_writes is disk_user_writes aggregated by plex.

    +

    smb2_set_info_ops

    +

    Number of SMB2_COM_SET_INFO operations

    @@ -50521,20 +55299,20 @@

    plex_disk_user_writesUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/smb2set_info_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances smb2set_info_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    qos_concurrency

    -

    This is the average number of concurrent requests for the workload.

    +

    smb2_tree_connect_latency

    +

    Average latency for SMB2_COM_TREE_CONNECT operations

    @@ -50547,33 +55325,20 @@

    qos_concurrencyUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/cluster/counter/tables/smb2tree_connect_latency
    Unit: microsec
    Type: average
    Base: tree_connect_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances workload_volumeconcurrency
    Unit: none
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlperf-object-get-instances smb2tree_connect_latency
    Unit: microsec
    Type: average
    Base: tree_connect_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    qos_detail_resource_latency

    -

    This refers to the average latency for workloads within the subsystems of Data ONTAP. These subsystems are the various modules or components within the system that could contribute to delays or latency during data or task processing. The calculated latency includes both the processing time within the subsystem and the waiting time at that subsystem. Below is the description of subsystems' latency.

    -
      -
    • frontend: Represents the delays in the network layer of ONTAP.
    • -
    • backend: Represents the delays in the data/WAFL layer of ONTAP.
    • -
    • cluster: Represents delays caused by the cluster switches, cables, and adapters which physically connect clustered nodes.If the cluster interconnect component is in contention, it means high wait time for I/O requests at the cluster interconnect is impacting the latency of one or more workloads.
    • -
    • cp: Represents delays due to buffered write flushes, called consistency points (cp).
    • -
    • disk: Represents slowness due to attached hard drives or solid state drives.
    • -
    • network: Note: Typically these latencies only apply to SAN not NAS. Represents the wait time of I/O requests by the external networking protocols on the cluster. The wait time is time spent waiting for transfer ready transactions to finish before the cluster can respond to an I/O request. If the network component is in contention, it means high wait time at the protocol layer is impacting the latency of one or more workloads.
    • -
    • nvlog: Represents delays due to mirroring writes to the NVRAM/NVLOG memory and to the HA partner NVRAM/NVLOG memory.
    • -
    • suspend: Represents delays due to operations suspending on a delay mechanism. Typically this is diagnosed by NetApp Support.
    • -
    • throttle: Represents the throughput maximum (ceiling) setting of the storage Quality of Service (QoS) policy group assigned to the workload. If the policy group component is in contention, it means all workloads in the policy group are being throttled by the set throughput limit, which is impacting the latency of one or more of those workloads.
    • -
    • qos_min: Represents the latency to a workload that is being caused by QoS throughput floor (expected) setting assigned to other workloads. If the QoS floor set on certain workloads use the majority of the bandwidth to guarantee the promised throughput, other workloads will be throttled and see more latency.
    • -
    • cloud: Represents the software component in the cluster involved with I/O processing between the cluster and the cloud tier on which user data is stored. If the cloud latency component is in contention, it means that a large amount of reads from volumes that are hosted on the cloud tier are impacting the latency of one or more workloads.
    • -
    +

    smb2_tree_connect_ops

    +

    Number of SMB2_COM_TREE_CONNECT operations

    @@ -50586,20 +55351,20 @@

    qos_detail_resource_latencyUnit: microseconds
    Type: average
    Base: ops -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_detail.yamlapi/cluster/counter/tables/smb2tree_connect_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances workload_detailHarvest generated
    Unit: microseconds
    Type: average
    Base: ops
    conf/zapiperf/9.12.0/workload_detail.yamlperf-object-get-instances smb2tree_connect_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    qos_detail_service_time_latency

    -

    This refers to the average service time for workload within the subsystems of the Data ONTAP. These subsystems are the various modules or components within the system that could contribute to delays or latency during data or task processing. This latency is the processing time within the subsystem.

    +

    smb2_write_latency

    +

    Average latency for SMB2_COM_WRITE operations

    @@ -50612,20 +55377,20 @@

    qos_detail_service_time_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/qos_detailHarvest generated
    Unit: microseconds
    Type: average
    Base: ops
    conf/restperf/9.12.0/workload_detail.yamlapi/cluster/counter/tables/smb2write_latency
    Unit: microsec
    Type: average
    Base: write_ops
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances workload_detailHarvest generated
    Unit: microseconds
    Type: average
    Base: ops
    conf/zapiperf/9.12.0/workload_detail.yamlperf-object-get-instances smb2write_latency
    Unit: microsec
    Type: average
    Base: write_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    qos_latency

    -

    This is the average response time for requests that were initiated by the workload.

    +

    smb2_write_ops

    +

    Number of SMB2_COM_WRITE operations

    @@ -50638,20 +55403,20 @@

    qos_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/qos_volumelatency
    Unit: microsec
    Type: average
    Base: ops
    conf/restperf/9.12.0/workload_volume.yamlapi/cluster/counter/tables/smb2write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yaml
    ZAPIperf-object-get-instances workload_volumelatency
    Unit: microsec
    Type: average,no-zero-values
    Base: ops
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlperf-object-get-instances smb2write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yaml
    -

    qos_ops

    -

    This field is the workload's rate of operations that completed during the measurement interval; measured per second.

    +

    snapmirror_break_failed_count

    +

    The number of failed SnapMirror break operations for the relationship

    @@ -50664,20 +55429,20 @@

    qos_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrorbreak_failed_countconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumeops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.break-failed-countconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_other_ops

    -

    This is the rate of this workload's other operations that completed during the measurement interval.

    +

    snapmirror_break_successful_count

    +

    The number of successful SnapMirror break operations for the relationship

    @@ -50690,20 +55455,20 @@

    qos_other_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/qosother_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload.yamlapi/private/cli/snapmirrorbreak_successful_countconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumeother_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.break-successful-countconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_read_data

    -

    This is the amount of data read per second from the filer by the workload.

    +

    snapmirror_lag_time

    +

    Amount of time since the last snapmirror transfer in seconds

    @@ -50716,20 +55481,20 @@

    qos_read_data

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/qos_volumeread_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrorlag_timeconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumeread_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.lag-timeconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_read_io_type

    -

    This is the percentage of read requests served from various components (such as buffer cache, ext_cache, disk, etc.).

    +

    snapmirror_last_transfer_duration

    +

    Duration of the last SnapMirror transfer in seconds

    @@ -50742,20 +55507,20 @@

    qos_read_io_typeUnit: percent
    Type: percent
    Base: read_io_type_base -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrorlast_transfer_durationconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumeread_io_type
    Unit: percent
    Type: percent
    Base: read_io_type_base
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.last-transfer-durationconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_read_latency

    -

    This is the average response time for read requests that were initiated by the workload.

    +

    snapmirror_last_transfer_end_timestamp

    +

    The Timestamp of the end of the last transfer

    @@ -50768,20 +55533,20 @@

    qos_read_latencyUnit: microsec
    Type: average
    Base: read_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrorlast_transfer_end_timestampconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumeread_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.last-transfer-end-timestampconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_read_ops

    -

    This is the rate of this workload's read operations that completed during the measurement interval.

    +

    snapmirror_last_transfer_size

    +

    Size in kilobytes (1024 bytes) of the last transfer

    @@ -50794,20 +55559,20 @@

    qos_read_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/qos_volumeread_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrorlast_transfer_sizeconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumeread_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.last-transfer-sizeconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_sequential_reads

    -

    This is the percentage of reads, performed on behalf of the workload, that were sequential.

    +

    snapmirror_newest_snapshot_timestamp

    +

    The timestamp of the newest Snapshot copy on the destination volume

    @@ -50820,20 +55585,20 @@

    qos_sequential_readsUnit: percent
    Type: percent
    Base: sequential_reads_base -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrornewest_snapshot_timestampconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumesequential_reads
    Unit: percent
    Type: percent,no-zero-values
    Base: sequential_reads_base
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.newest-snapshot-timestampconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_sequential_writes

    -

    This is the percentage of writes, performed on behalf of the workload, that were sequential. This counter is only available on platforms with more than 4GB of NVRAM.

    +

    snapmirror_resync_failed_count

    +

    The number of failed SnapMirror resync operations for the relationship

    @@ -50846,20 +55611,20 @@

    qos_sequential_writesUnit: percent
    Type: percent
    Base: sequential_writes_base -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrorresync_failed_countconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumesequential_writes
    Unit: percent
    Type: percent,no-zero-values
    Base: sequential_writes_base
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.resync-failed-countconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_total_data

    -

    This is the total amount of data read/written per second from/to the filer by the workload.

    +

    snapmirror_resync_successful_count

    +

    The number of successful SnapMirror resync operations for the relationship

    @@ -50872,20 +55637,20 @@

    qos_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrorresync_successful_countconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumetotal_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.resync-successful-countconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_write_data

    -

    This is the amount of data written per second to the filer by the workload.

    +

    snapmirror_total_transfer_bytes

    +

    Cumulative bytes transferred for the relationship

    @@ -50898,20 +55663,20 @@

    qos_write_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrortotal_transfer_bytesconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumewrite_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.total-transfer-bytesconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_write_latency

    -

    This is the average response time for write requests that were initiated by the workload.

    +

    snapmirror_total_transfer_time_secs

    +

    Cumulative total transfer time in seconds for the relationship

    @@ -50924,20 +55689,20 @@

    qos_write_latencyUnit: microsec
    Type: average
    Base: write_ops -

    + + + - - - + + +
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrortotal_transfer_time_secsconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumewrite_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_ops
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.total-transfer-time-secsconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qos_write_ops

    -

    This is the workload's write operations that completed during the measurement interval; measured per second.

    +

    snapmirror_update_failed_count

    +

    The number of successful SnapMirror update operations for the relationship

    @@ -50950,20 +55715,20 @@

    qos_write_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/qos_volumewrite_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/workload_volume.yamlapi/private/cli/snapmirrorupdate_failed_countconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances workload_volumewrite_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/workload_volume.yamlsnapmirror-get-itersnapmirror-info.update-failed-countconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qtree_cifs_ops

    -

    Number of CIFS operations per second to the qtree

    +

    snapmirror_update_successful_count

    +

    Number of Successful Updates

    @@ -50976,20 +55741,20 @@

    qtree_cifs_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/qtree.yamlapi/private/cli/snapmirrorupdate_successful_countconf/rest/9.12.0/snapmirror.yaml
    ZAPIperf-object-get-instances qtreecifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/qtree.yamlsnapmirror-get-itersnapmirror-info.update-successful-countconf/zapi/cdot/9.8.0/snapmirror.yaml
    -

    qtree_id

    -

    The identifier for the qtree, unique within the qtree's volume.

    +

    snapshot_policy_total_schedules

    +

    Total Number of Schedules in this Policy

    @@ -51002,14 +55767,20 @@

    qtree_idqtree_internal_ops

    -

    Number of internal operations generated by activites such as snapmirror and backup per second to the qtree

    +

    svm_cifs_connections

    +

    Number of connections

    @@ -51022,20 +55793,20 @@

    qtree_internal_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/qtree.yamlapi/cluster/counter/tables/svm_cifsconnections
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIperf-object-get-instances qtreeinternal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vserverconnections
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    qtree_nfs_ops

    -

    Number of NFS operations per second to the qtree

    +

    svm_cifs_established_sessions

    +

    Number of established SMB and SMB2 sessions

    @@ -51048,20 +55819,20 @@

    qtree_nfs_ops

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/qtreenfs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/qtree.yamlapi/cluster/counter/tables/svm_cifsestablished_sessions
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIperf-object-get-instances qtreenfs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vserverestablished_sessions
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    qtree_total_ops

    -

    Summation of NFS ops, CIFS ops, CSS ops and internal ops

    +

    svm_cifs_latency

    +

    Average latency for CIFS operations

    @@ -51074,20 +55845,20 @@

    qtree_total_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/qtree.yamlapi/cluster/counter/tables/svm_cifslatency
    Unit: microsec
    Type: average
    Base: latency_base
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIperf-object-get-instances qtreetotal_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vservercifs_latency
    Unit: microsec
    Type: average
    Base: cifs_latency_base
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_disk_limit

    -

    Maximum amount of disk space, in kilobytes, allowed for the quota target (hard disk space limit). The value is -1 if the limit is unlimited.

    +

    svm_cifs_op_count

    +

    Array of select CIFS operation counts

    @@ -51100,20 +55871,20 @@

    quota_disk_limitUnit: none
    Type: rate
    Base: +

    - - - + + +
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIquota-report-iterdisk-limitconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vservercifs_op_count
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_disk_used

    -

    Current amount of disk space, in kilobytes, used by the quota target.

    +

    svm_cifs_open_files

    +

    Number of open files over SMB and SMB2

    @@ -51126,20 +55897,20 @@

    quota_disk_usedUnit: none
    Type: raw
    Base: +

    - - - + + +
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIquota-report-iterdisk-usedconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vserveropen_files
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_disk_used_pct_disk_limit

    -

    Current disk space used expressed as a percentage of hard disk limit.

    +

    svm_cifs_ops

    +

    Total number of CIFS operations

    @@ -51152,20 +55923,20 @@

    quota_disk_used_pct_disk_limitUnit: per_sec
    Type: rate
    Base: +

    - - - + + +
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIquota-report-iterdisk-used-pct-disk-limitconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vservercifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_disk_used_pct_soft_disk_limit

    -

    Current disk space used expressed as a percentage of soft disk limit.

    +

    svm_cifs_read_latency

    +

    Average latency for CIFS read operations

    @@ -51178,20 +55949,20 @@

    quota_disk_used_pct_soft_disk_limit

    - - - + + + - - - + + + -
    RESTapi/storage/quota/reportsspace.used.soft_limit_percentconf/rest/9.12.0/qtree.yamlapi/cluster/counter/tables/svm_cifsaverage_read_latency
    Unit: microsec
    Type: average
    Base: total_read_ops
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIquota-report-iterdisk-used-pct-soft-disk-limitconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vservercifs_read_latency
    Unit: microsec
    Type: average
    Base: cifs_read_ops
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_disk_used_pct_threshold

    -

    Current disk space used expressed as a percentage of threshold.

    + +

    svm_cifs_read_ops

    +

    Total number of CIFS read operations

    @@ -51203,15 +55974,21 @@

    quota_disk_used_pct_thresholdUnit: per_sec
    Type: rate
    Base: +

    + + - - - + + +
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIquota-report-iterdisk-used-pct-thresholdconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vservercifs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_file_limit

    -

    Maximum number of files allowed for the quota target (hard files limit). The value is -1 if the limit is unlimited.

    +

    svm_cifs_signed_sessions

    +

    Number of signed SMB and SMB2 sessions.

    @@ -51224,20 +56001,20 @@

    quota_file_limitUnit: none
    Type: raw
    Base: +

    - - - + + +
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIquota-report-iterfile-limitconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vserversigned_sessions
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_files_used

    -

    Current number of files used by the quota target.

    +

    svm_cifs_write_latency

    +

    Average latency for CIFS write operations

    @@ -51250,20 +56027,20 @@

    quota_files_usedUnit: microsec
    Type: average
    Base: total_write_ops +

    - - - + + +
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIquota-report-iterfiles-usedconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vservercifs_write_latency
    Unit: microsec
    Type: average
    Base: cifs_write_ops
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_files_used_pct_file_limit

    -

    Current number of files used expressed as a percentage of hard file limit.

    +

    svm_cifs_write_ops

    +

    Total number of CIFS write operations

    @@ -51276,20 +56053,20 @@

    quota_files_used_pct_file_limit

    - - - + + + - - - + + +
    RESTapi/storage/quota/reportsfiles.used.hard_limit_percentconf/rest/9.12.0/qtree.yamlapi/cluster/counter/tables/svm_cifstotal_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/cifs_vserver.yaml
    ZAPIquota-report-iterfiles-used-pct-file-limitconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances cifs:vservercifs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml
    -

    quota_files_used_pct_soft_file_limit

    -

    Current number of files used expressed as a percentage of soft file limit.

    +

    svm_nfs_access_avg_latency

    +

    Average latency of Access procedure requests. The counter keeps track of the average response time of Access requests.

    @@ -51302,20 +56079,56 @@

    quota_files_used_pct_soft_file_lim

    - - - + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    RESTapi/storage/quota/reportsfiles.used.soft_limit_percentconf/rest/9.12.0/qtree.yamlapi/cluster/counter/tables/svm_nfs_v3access.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4access.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41access.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42access.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIquota-report-iterfiles-used-pct-soft-file-limitconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances nfsv3access_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4access_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1access_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2access_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    quota_soft_disk_limit

    -

    soft disk space limit, in kilobytes, for the quota target. The value is -1 if the limit is unlimited.

    +

    svm_nfs_access_total

    +

    Total number of Access procedure requests. It is the total number of access success and access error requests.

    @@ -51328,20 +56141,56 @@

    quota_soft_disk_limitUnit: none
    Type: rate
    Base: +

    + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4access.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41access.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42access.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIquota-report-itersoft-disk-limitconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances nfsv3access_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4access_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1access_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2access_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    quota_soft_file_limit

    -

    Soft file limit, in number of files, for the quota target. The value is -1 if the limit is unlimited.

    +

    svm_nfs_backchannel_ctl_avg_latency

    +

    Average latency of BACKCHANNEL_CTL operations.

    @@ -51354,20 +56203,32 @@

    quota_soft_file_limitUnit: microsec
    Type: average
    Base: backchannel_ctl.total +

    + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42backchannel_ctl.average_latency
    Unit: microsec
    Type: average
    Base: backchannel_ctl.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIquota-report-itersoft-file-limitconf/zapi/cdot/9.8.0/qtree.yamlperf-object-get-instances nfsv4_1backchannel_ctl_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: backchannel_ctl_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2backchannel_ctl_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: backchannel_ctl_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    quota_threshold

    -

    Disk space threshold, in kilobytes, for the quota target. The value is -1 if the limit is unlimited.

    +

    svm_nfs_backchannel_ctl_total

    +

    Total number of BACKCHANNEL_CTL operations.

    @@ -51379,21 +56240,33 @@

    quota_thresholdUnit: none
    Type: rate
    Base: +

    - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTNAHarvest generatedconf/rest/9.12.0/qtree.yamlapi/cluster/counter/tables/svm_nfs_v42backchannel_ctl.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1backchannel_ctl_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2backchannel_ctl_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_busy

    -

    The utilization percent of the disk. raid_disk_busy is disk_busy aggregated by raid.

    +

    svm_nfs_bind_conn_to_session_avg_latency

    +

    Average latency of BIND_CONN_TO_SESSION operations.

    @@ -51406,20 +56279,32 @@

    raid_disk_busyUnit: percent
    Type: percent
    Base: base_for_disk_busy -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41bind_connections_to_session.average_latency
    Unit: microsec
    Type: average
    Base: bind_connections_to_session.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42bind_conn_to_session.average_latency
    Unit: microsec
    Type: average
    Base: bind_conn_to_session.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_busy
    Unit: percent
    Type: percent
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1bind_conn_to_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: bind_conn_to_session_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2bind_conn_to_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: bind_conn_to_session_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_capacity

    -

    Disk capacity in MB. raid_disk_capacity is disk_capacity aggregated by raid.

    +

    svm_nfs_bind_conn_to_session_total

    +

    Total number of BIND_CONN_TO_SESSION operations.

    @@ -51432,20 +56317,32 @@

    raid_disk_capacityUnit: mb
    Type: raw
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41bind_connections_to_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42bind_conn_to_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentdisk_capacity
    Unit: mb
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1bind_conn_to_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2bind_conn_to_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_cp_read_chain

    -

    Average number of blocks transferred in each consistency point read operation during a CP. raid_disk_cp_read_chain is disk_cp_read_chain aggregated by raid.

    +

    svm_nfs_close_avg_latency

    +

    Average latency of CLOSE procedures

    @@ -51458,20 +56355,44 @@

    raid_disk_cp_read_chain

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_read_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4close.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41close.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42close.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_chain
    Unit: none
    Type: average
    Base: cp_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4close_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1close_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2close_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_cp_read_latency

    -

    Average latency per block in microseconds for consistency point read operations. raid_disk_cp_read_latency is disk_cp_read_latency aggregated by raid.

    +

    svm_nfs_close_total

    +

    Total number of CLOSE procedures

    @@ -51484,20 +56405,44 @@

    raid_disk_cp_read_latencyUnit: microsec
    Type: average
    Base: cp_read_blocks -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4close.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41close.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42close.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentcp_read_latency
    Unit: microsec
    Type: average
    Base: cp_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4close_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1close_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2close_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_cp_reads

    -

    Number of disk read operations initiated each second for consistency point processing. raid_disk_cp_reads is disk_cp_reads aggregated by raid.

    +

    svm_nfs_commit_avg_latency

    +

    Average latency of Commit procedure requests. The counter keeps track of the average response time of Commit requests.

    @@ -51510,20 +56455,56 @@

    raid_disk_cp_readsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3commit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4commit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41commit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42commit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentcp_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3commit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4commit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1commit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2commit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_io_pending

    -

    Average number of I/Os issued to the disk for which we have not yet received the response. raid_disk_io_pending is disk_io_pending aggregated by raid.

    +

    svm_nfs_commit_total

    +

    Total number of Commit procedure requests. It is the total number of Commit success and Commit error requests.

    @@ -51536,20 +56517,56 @@

    raid_disk_io_pendingUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3commit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4commit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41commit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42commit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentio_pending
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3commit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4commit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1commit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2commit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_io_queued

    -

    Number of I/Os queued to the disk but not yet issued. raid_disk_io_queued is disk_io_queued aggregated by raid.

    +

    svm_nfs_create_avg_latency

    +

    Average latency of Create procedure requests. The counter keeps track of the average response time of Create requests.

    @@ -51562,20 +56579,56 @@

    raid_disk_io_queuedUnit: none
    Type: average
    Base: base_for_disk_busy -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3create.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4create.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41create.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42create.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentio_queued
    Unit: none
    Type: average
    Base: base_for_disk_busy
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3create_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4create_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1create_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2create_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_total_data

    -

    Total throughput for user operations per second. raid_disk_total_data is disk_total_data aggregated by raid.

    +

    svm_nfs_create_session_avg_latency

    +

    Average latency of CREATE_SESSION operations.

    @@ -51588,20 +56641,32 @@

    raid_disk_total_dataUnit: b_per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41create_session.average_latency
    Unit: microsec
    Type: average
    Base: create_session.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42create_session.average_latency
    Unit: microsec
    Type: average
    Base: create_session.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1create_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_session_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2create_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_session_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_total_transfers

    -

    Total number of disk operations involving data transfer initiated per second. raid_disk_total_transfers is disk_total_transfers aggregated by raid.

    +

    svm_nfs_create_session_total

    +

    Total number of CREATE_SESSION operations.

    @@ -51614,20 +56679,32 @@

    raid_disk_total_transfersUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41create_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42create_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituenttotal_transfers
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1create_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2create_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_user_read_blocks

    -

    Number of blocks transferred for user read operations per second. raid_disk_user_read_blocks is disk_user_read_blocks aggregated by raid.

    +

    svm_nfs_create_total

    +

    Total number Create of procedure requests. It is the total number of create success and create error requests.

    @@ -51640,72 +56717,56 @@

    raid_disk_user_read_blocksUnit: per_sec
    Type: rate
    Base: -

    + + + - - - - + + + + - -
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3create.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlRESTapi/cluster/counter/tables/svm_nfs_v4create.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    -

    raid_disk_user_read_chain

    -

    Average number of blocks transferred in each user read operation. raid_disk_user_read_chain is disk_user_read_chain aggregated by raid.

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v41create.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_read_count
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v42create.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_chain
    Unit: none
    Type: average
    Base: user_reads
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3create_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    raid_disk_user_read_latency

    -

    Average latency per block in microseconds for user read operations. raid_disk_user_read_latency is disk_user_read_latency aggregated by raid.

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4create_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_block_count
    conf/restperf/9.12.0/disk.yamlZAPIperf-object-get-instances nfsv4_1create_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances disk:constituentuser_read_latency
    Unit: microsec
    Type: average
    Base: user_read_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_2create_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_user_reads

    -

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. raid_disk_user_reads is disk_user_reads aggregated by raid.

    +

    svm_nfs_delegpurge_avg_latency

    +

    Average latency of DELEGPURGE procedures

    @@ -51718,46 +56779,44 @@

    raid_disk_user_readsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - - + + + + - -
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4delegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances disk:constituentuser_reads
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlRESTapi/cluster/counter/tables/svm_nfs_v41delegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    -

    raid_disk_user_write_blocks

    -

    Number of blocks transferred for user write operations per second. raid_disk_user_write_blocks is disk_user_write_blocks aggregated by raid.

    - - - - - - + + + + - - - - - - + + + + - - - + + + + + + + + +
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42delegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    RESTapi/cluster/counter/tables/disk:constituentuser_write_block_count
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/disk.yamlZAPIperf-object-get-instances nfsv4delegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_blocks
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1delegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2delegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_user_write_chain

    -

    Average number of blocks transferred in each user write operation. raid_disk_user_write_chain is disk_user_write_chain aggregated by raid.

    +

    svm_nfs_delegpurge_total

    +

    Total number of DELEGPURGE procedures

    @@ -51770,20 +56829,44 @@

    raid_disk_user_write_chainUnit: none
    Type: average
    Base: user_write_count -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4delegpurge.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41delegpurge.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42delegpurge.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_chain
    Unit: none
    Type: average
    Base: user_writes
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4delegpurge_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1delegpurge_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2delegpurge_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_user_write_latency

    -

    Average latency per block in microseconds for user write operations. raid_disk_user_write_latency is disk_user_write_latency aggregated by raid.

    +

    svm_nfs_delegreturn_avg_latency

    +

    Average latency of DELEGRETURN procedures

    @@ -51796,20 +56879,44 @@

    raid_disk_user_write_latencyUnit: microsec
    Type: average
    Base: user_write_block_count -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4delegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41delegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42delegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_write_latency
    Unit: microsec
    Type: average
    Base: user_write_blocks
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4delegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1delegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2delegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    raid_disk_user_writes

    -

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. raid_disk_user_writes is disk_user_writes aggregated by raid.

    +

    svm_nfs_delegreturn_total

    +

    Total number of DELEGRETURN procedures

    @@ -51822,40 +56929,44 @@

    raid_disk_user_writesUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + - - - + + + - -
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v4delegreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41delegreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42delegreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances disk:constituentuser_writes
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4delegreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    rw_ctx_cifs_giveups

    -

    Array of number of given-ups of cifs ops because they rewind more than a certain threshold, categorized by their rewind reasons.

    - - - - - - + + + + - - - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4_1delegreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances rw_ctxcifs_giveups
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yamlperf-object-get-instances nfsv4_2delegreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    rw_ctx_cifs_rewinds

    -

    Array of number of rewinds for cifs ops based on their reasons.

    +

    svm_nfs_destroy_clientid_avg_latency

    +

    Average latency of DESTROY_CLIENTID operations.

    @@ -51867,15 +56978,33 @@

    rw_ctx_cifs_rewindsUnit: microsec
    Type: average
    Base: destroy_clientid.total +

    + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42destroy_clientid.average_latency
    Unit: microsec
    Type: average
    Base: destroy_clientid.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances rw_ctxcifs_rewinds
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yamlperf-object-get-instances nfsv4_1destroy_clientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_clientid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2destroy_clientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_clientid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    rw_ctx_nfs_giveups

    -

    Array of number of given-ups of nfs ops because they rewind more than a certain threshold, categorized by their rewind reasons.

    +

    svm_nfs_destroy_clientid_total

    +

    Total number of DESTROY_CLIENTID operations.

    @@ -51887,15 +57016,33 @@

    rw_ctx_nfs_giveupsUnit: none
    Type: rate
    Base: +

    + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42destroy_clientid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances rw_ctxnfs_giveups
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yamlperf-object-get-instances nfsv4_1destroy_clientid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2destroy_clientid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    rw_ctx_nfs_rewinds

    -

    Array of number of rewinds for nfs ops based on their reasons.

    +

    svm_nfs_destroy_session_avg_latency

    +

    Average latency of DESTROY_SESSION operations.

    @@ -51907,15 +57054,33 @@

    rw_ctx_nfs_rewindsUnit: microsec
    Type: average
    Base: destroy_session.total +

    + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42destroy_session.average_latency
    Unit: microsec
    Type: average
    Base: destroy_session.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances rw_ctxnfs_rewinds
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yamlperf-object-get-instances nfsv4_1destroy_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_session_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2destroy_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_session_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    rw_ctx_qos_flowcontrol

    -

    The number of times QoS limiting has enabled stream flowcontrol.

    +

    svm_nfs_destroy_session_total

    +

    Total number of DESTROY_SESSION operations.

    @@ -51927,15 +57092,33 @@

    rw_ctx_qos_flowcontrol

    + + + + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41destroy_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42destroy_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances rw_ctxqos_flowcontrol
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yamlperf-object-get-instances nfsv4_1destroy_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2destroy_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    rw_ctx_qos_rewinds

    -

    The number of restarts after a rewind because of QoS limiting.

    +

    svm_nfs_exchange_id_avg_latency

    +

    Average latency of EXCHANGE_ID operations.

    @@ -51947,15 +57130,33 @@

    rw_ctx_qos_rewindsUnit: microsec
    Type: average
    Base: exchange_id.total +

    + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42exchange_id.average_latency
    Unit: microsec
    Type: average
    Base: exchange_id.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances rw_ctxqos_rewinds
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/rwctx.yamlperf-object-get-instances nfsv4_1exchange_id_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: exchange_id_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2exchange_id_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: exchange_id_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    security_audit_destination_port

    -

    The destination port used to forward the message.

    +

    svm_nfs_exchange_id_total

    +

    Total number of EXCHANGE_ID operations.

    @@ -51967,14 +57168,33 @@

    security_audit_destination_port

    + + + + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41exchange_id.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42exchange_id.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIcluster-log-forward-get-itercluster-log-forward-info.portconf/zapi/cdot/9.8.0/security_audit_dest.yamlperf-object-get-instances nfsv4_1exchange_id_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2exchange_id_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    security_certificate_expiry_time

    +

    svm_nfs_free_stateid_avg_latency

    +

    Average latency of FREE_STATEID operations.

    @@ -51987,20 +57207,32 @@

    security_certificate_expiry_time

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/private/cli/security/certificateexpirationconf/rest/9.12.0/security_certificate.yamlapi/cluster/counter/tables/svm_nfs_v41free_stateid.average_latency
    Unit: microsec
    Type: average
    Base: free_stateid.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42free_stateid.average_latency
    Unit: microsec
    Type: average
    Base: free_stateid.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIsecurity-certificate-get-itercertificate-info.expiration-dateconf/zapi/cdot/9.8.0/security_certificate.yamlperf-object-get-instances nfsv4_1free_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: free_stateid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2free_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: free_stateid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    security_ssh_max_instances

    -

    Maximum possible simultaneous connections.

    +

    svm_nfs_free_stateid_total

    +

    Total number of FREE_STATEID operations.

    @@ -52013,40 +57245,32 @@

    security_ssh_max_instancesUnit: none
    Type: rate
    Base: +

    - -
    conf/restperf/9.12.0/nfsv4_1.yaml
    -

    shelf_average_ambient_temperature

    -

    Average temperature of all ambient sensors for shelf in Celsius.

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42free_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    RESTNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yamlZAPIperf-object-get-instances nfsv4_1free_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_2free_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    shelf_average_fan_speed

    -

    Average fan speed for shelf in rpm.

    +

    svm_nfs_fsinfo_avg_latency

    +

    Average latency of FSInfo procedure requests. The counter keeps track of the average response time of FSInfo requests.

    @@ -52059,20 +57283,20 @@

    shelf_average_fan_speed

    - - - + + + - - - + + +
    RESTNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3fsinfo.average_latency
    Unit: microsec
    Type: average
    Base: fsinfo.total
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3fsinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: fsinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    shelf_average_temperature

    -

    Average temperature of all non-ambient sensors for shelf in Celsius.

    +

    svm_nfs_fsinfo_total

    +

    Total number FSInfo of procedure requests. It is the total number of FSInfo success and FSInfo error requests.

    @@ -52085,20 +57309,20 @@

    shelf_average_temperatureUnit:
    Type:
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3fsinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3fsinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    shelf_disk_count

    -

    Disk count in a shelf.

    +

    svm_nfs_fsstat_avg_latency

    +

    Average latency of FSStat procedure requests. The counter keeps track of the average response time of FSStat requests.

    @@ -52111,20 +57335,20 @@

    shelf_disk_countUnit: microsec
    Type: average
    Base: fsstat.total +

    - - - + + +
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIstorage-shelf-info-get-iterstorage-shelf-info.disk-countconf/zapi/cdot/9.8.0/shelf.yamlperf-object-get-instances nfsv3fsstat_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: fsstat_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    shelf_max_fan_speed

    -

    Maximum fan speed for shelf in rpm.

    +

    svm_nfs_fsstat_total

    +

    Total number FSStat of procedure requests. It is the total number of FSStat success and FSStat error requests.

    @@ -52137,20 +57361,20 @@

    shelf_max_fan_speedUnit:
    Type:
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3fsstat.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3fsstat_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    shelf_max_temperature

    -

    Maximum temperature of all non-ambient sensors for shelf in Celsius.

    +

    svm_nfs_get_dir_delegation_avg_latency

    +

    Average latency of GET_DIR_DELEGATION operations.

    @@ -52163,20 +57387,32 @@

    shelf_max_temperatureUnit:
    Type:
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41get_dir_delegation.average_latency
    Unit: microsec
    Type: average
    Base: get_dir_delegation.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42get_dir_delegation.average_latency
    Unit: microsec
    Type: average
    Base: get_dir_delegation.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1get_dir_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_dir_delegation_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2get_dir_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_dir_delegation_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    shelf_min_ambient_temperature

    -

    Minimum temperature of all ambient sensors for shelf in Celsius.

    +

    svm_nfs_get_dir_delegation_total

    +

    Total number of GET_DIR_DELEGATION operations.

    @@ -52189,20 +57425,32 @@

    shelf_min_ambient_temperatureUnit:
    Type:
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41get_dir_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42get_dir_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1get_dir_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2get_dir_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    shelf_min_fan_speed

    -

    Minimum fan speed for shelf in rpm.

    +

    svm_nfs_getattr_avg_latency

    +

    Average latency of GetAttr procedure requests. This counter keeps track of the average response time of GetAttr requests.

    @@ -52215,20 +57463,56 @@

    shelf_min_fan_speedUnit:
    Type:
    Base: -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3getattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4getattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41getattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42getattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3getattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4getattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1getattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2getattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    shelf_min_temperature

    -

    Minimum temperature of all non-ambient sensors for shelf in Celsius.

    +

    svm_nfs_getattr_total

    +

    Total number of Getattr procedure requests. It is the total number of getattr success and getattr error requests.

    @@ -52241,20 +57525,56 @@

    shelf_min_temperatureUnit:
    Type:
    Base: -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v3getattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4getattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41getattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42getattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv3getattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4getattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1getattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2getattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    shelf_power

    -

    Power consumed by shelf in Watts.

    +

    svm_nfs_getdeviceinfo_avg_latency

    +

    Average latency of GETDEVICEINFO operations.

    @@ -52267,20 +57587,32 @@

    shelf_power

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTNAHarvest generated
    Unit:
    Type:
    Base:
    conf/restperf/9.12.0/disk.yamlapi/cluster/counter/tables/svm_nfs_v41getdeviceinfo.average_latency
    Unit: microsec
    Type: average
    Base: getdeviceinfo.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42getdeviceinfo.average_latency
    Unit: microsec
    Type: average
    Base: getdeviceinfo.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPINAHarvest generated
    Unit:
    Type:
    Base:
    conf/zapiperf/cdot/9.8.0/disk.yamlperf-object-get-instances nfsv4_1getdeviceinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdeviceinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2getdeviceinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdeviceinfo_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_close_latency

    -

    Average latency for SMB2_COM_CLOSE operations

    +

    svm_nfs_getdeviceinfo_total

    +

    Total number of GETDEVICEINFO operations.

    @@ -52293,20 +57625,32 @@

    smb2_close_latencyUnit: microsec
    Type: average
    Base: close_ops -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41getdeviceinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42getdeviceinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2close_latency
    Unit: microsec
    Type: average
    Base: close_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1getdeviceinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2getdeviceinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_close_latency_histogram

    -

    Latency histogram for SMB2_COM_CLOSE operations

    +

    svm_nfs_getdevicelist_avg_latency

    +

    Average latency of GETDEVICELIST operations.

    @@ -52319,20 +57663,32 @@

    smb2_close_latency_histogramUnit: none
    Type: delta
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41getdevicelist.average_latency
    Unit: microsec
    Type: average
    Base: getdevicelist.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42getdevicelist.average_latency
    Unit: microsec
    Type: average
    Base: getdevicelist.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2close_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1getdevicelist_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdevicelist_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2getdevicelist_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdevicelist_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_close_ops

    -

    Number of SMB2_COM_CLOSE operations

    +

    svm_nfs_getdevicelist_total

    +

    Total number of GETDEVICELIST operations.

    @@ -52345,20 +57701,32 @@

    smb2_close_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41getdevicelist.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42getdevicelist.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2close_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1getdevicelist_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2getdevicelist_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_create_latency

    -

    Average latency for SMB2_COM_CREATE operations

    +

    svm_nfs_getfh_avg_latency

    +

    Average latency of GETFH procedures

    @@ -52371,20 +57739,44 @@

    smb2_create_latencyUnit: microsec
    Type: average
    Base: create_ops -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4getfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41getfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42getfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2create_latency
    Unit: microsec
    Type: average
    Base: create_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4getfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1getfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2getfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_create_latency_histogram

    -

    Latency histogram for SMB2_COM_CREATE operations

    +

    svm_nfs_getfh_total

    +

    Total number of GETFH procedures

    @@ -52397,20 +57789,44 @@

    smb2_create_latency_histogramUnit: none
    Type: delta
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4getfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41getfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42getfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2create_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4getfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1getfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2getfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_create_ops

    -

    Number of SMB2_COM_CREATE operations

    +

    svm_nfs_latency

    +

    Average latency of NFSv3 requests. This counter keeps track of the average response time of NFSv3 requests.

    @@ -52423,20 +57839,56 @@

    smb2_create_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v3latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42latency
    Unit: microsec
    Type: average
    Base: total_ops
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2create_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv3latency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4latency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1latency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2latency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_lock_latency

    -

    Average latency for SMB2_COM_LOCK operations

    +

    svm_nfs_layoutcommit_avg_latency

    +

    Average latency of LAYOUTCOMMIT operations.

    @@ -52449,20 +57901,32 @@

    smb2_lock_latencyUnit: microsec
    Type: average
    Base: lock_ops -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41layoutcommit.average_latency
    Unit: microsec
    Type: average
    Base: layoutcommit.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutcommit.average_latency
    Unit: microsec
    Type: average
    Base: layoutcommit.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2lock_latency
    Unit: microsec
    Type: average
    Base: lock_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1layoutcommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutcommit_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutcommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutcommit_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_lock_latency_histogram

    -

    Latency histogram for SMB2_COM_LOCK operations

    +

    svm_nfs_layoutcommit_total

    +

    Total number of LAYOUTCOMMIT operations.

    @@ -52475,20 +57939,32 @@

    smb2_lock_latency_histogramUnit: none
    Type: delta
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41layoutcommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutcommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2lock_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1layoutcommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutcommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_lock_ops

    -

    Number of SMB2_COM_LOCK operations

    +

    svm_nfs_layoutget_avg_latency

    +

    Average latency of LAYOUTGET operations.

    @@ -52501,20 +57977,32 @@

    smb2_lock_ops

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/smb2lock_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41layoutget.average_latency
    Unit: microsec
    Type: average
    Base: layoutget.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutget.average_latency
    Unit: microsec
    Type: average
    Base: layoutget.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2lock_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1layoutget_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutget_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutget_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutget_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_negotiate_latency

    -

    Average latency for SMB2_COM_NEGOTIATE operations

    +

    svm_nfs_layoutget_total

    +

    Total number of LAYOUTGET operations.

    @@ -52527,20 +58015,32 @@

    smb2_negotiate_latency

    - - - + + + + + + + + + - - - + + + + + + + + +
    RESTapi/cluster/counter/tables/smb2negotiate_latency
    Unit: microsec
    Type: average
    Base: negotiate_ops
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41layoutget.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutget.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2negotiate_latency
    Unit: microsec
    Type: average
    Base: negotiate_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1layoutget_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutget_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_negotiate_ops

    -

    Number of SMB2_COM_NEGOTIATE operations

    +

    svm_nfs_layoutreturn_avg_latency

    +

    Average latency of LAYOUTRETURN operations.

    @@ -52553,20 +58053,32 @@

    smb2_negotiate_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41layoutreturn.average_latency
    Unit: microsec
    Type: average
    Base: layoutreturn.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutreturn.average_latency
    Unit: microsec
    Type: average
    Base: layoutreturn.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2negotiate_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1layoutreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutreturn_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_oplock_break_latency

    -

    Average latency for SMB2_COM_OPLOCK_BREAK operations

    +

    svm_nfs_layoutreturn_total

    +

    Total number of LAYOUTRETURN operations.

    @@ -52579,20 +58091,32 @@

    smb2_oplock_break_latencyUnit: microsec
    Type: average
    Base: oplock_break_ops -

    + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v41layoutreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2oplock_break_latency
    Unit: microsec
    Type: average
    Base: oplock_break_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1layoutreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_oplock_break_latency_histogram

    -

    Latency histogram for SMB2_COM_OPLOCK_BREAK operations

    + +

    Average latency of Link procedure requests. The counter keeps track of the average response time of Link requests.

    @@ -52605,46 +58129,56 @@

    smb2_oplock_break_latency_histogram

    - - - + + + - - - - + + + + - -
    RESTapi/cluster/counter/tables/smb2oplock_break_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v3link.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances smb2oplock_break_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlRESTapi/cluster/counter/tables/svm_nfs_v4link.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    conf/restperf/9.12.0/nfsv4.yaml
    -

    smb2_oplock_break_ops

    -

    Number of SMB2_COM_OPLOCK_BREAK operations

    - - - - - - + + + + - - - - - + + + - - - + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v41link.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/smb2oplock_break_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v42link.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2oplock_break_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv3link_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4link_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1link_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2link_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_query_directory_latency

    -

    Average latency for SMB2_COM_QUERY_DIRECTORY operations

    + +

    Total number Link of procedure requests. It is the total number of Link success and Link error requests.

    @@ -52657,20 +58191,56 @@

    smb2_query_directory_latencyUnit: microsec
    Type: average
    Base: query_directory_ops -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v3link.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4link.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41link.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42link.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2query_directory_latency
    Unit: microsec
    Type: average
    Base: query_directory_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv3link_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4link_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1link_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2link_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_query_directory_latency_histogram

    -

    Latency histogram for SMB2_COM_QUERY_DIRECTORY operations

    +

    svm_nfs_lock_avg_latency

    +

    Average latency of LOCK procedures

    @@ -52683,20 +58253,44 @@

    smb2_query_directory_latency_his

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/smb2query_directory_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4lock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41lock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42lock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2query_directory_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4lock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1lock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2lock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_query_directory_ops

    -

    Number of SMB2_COM_QUERY_DIRECTORY operations

    +

    svm_nfs_lock_total

    +

    Total number of LOCK procedures

    @@ -52709,20 +58303,44 @@

    smb2_query_directory_ops

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/smb2query_directory_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4lock.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41lock.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42lock.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2query_directory_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4lock_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1lock_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2lock_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_query_info_latency

    -

    Average latency for SMB2_COM_QUERY_INFO operations

    +

    svm_nfs_lockt_avg_latency

    +

    Average latency of LOCKT procedures

    @@ -52735,20 +58353,44 @@

    smb2_query_info_latency

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/smb2query_info_latency
    Unit: microsec
    Type: average
    Base: query_info_ops
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4lockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41lockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42lockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2query_info_latency
    Unit: microsec
    Type: average
    Base: query_info_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4lockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1lockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2lockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_query_info_latency_histogram

    -

    Latency histogram for SMB2_COM_QUERY_INFO operations

    +

    svm_nfs_lockt_total

    +

    Total number of LOCKT procedures

    @@ -52761,20 +58403,44 @@

    smb2_query_info_latency_histogram

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/smb2query_info_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4lockt.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41lockt.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42lockt.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2query_info_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4lockt_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1lockt_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2lockt_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_query_info_ops

    -

    Number of SMB2_COM_QUERY_INFO operations

    +

    svm_nfs_locku_avg_latency

    +

    Average latency of LOCKU procedures

    @@ -52787,20 +58453,44 @@

    smb2_query_info_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4locku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41locku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42locku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2query_info_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4locku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1locku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2locku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_read_latency

    -

    Average latency for SMB2_COM_READ operations

    +

    svm_nfs_locku_total

    +

    Total number of LOCKU procedures

    @@ -52813,20 +58503,44 @@

    smb2_read_latencyUnit: microsec
    Type: average
    Base: read_ops -

    + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4locku.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41locku.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42locku.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4locku_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances smb2read_latency
    Unit: microsec
    Type: average
    Base: read_ops
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1locku_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2locku_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_read_ops

    -

    Number of SMB2_COM_READ operations

    +

    svm_nfs_lookup_avg_latency

    +

    Average latency of LookUp procedure requests. This shows the average time it takes for the LookUp operation to reply to the request.

    @@ -52839,72 +58553,56 @@

    smb2_read_ops

    - - - + + + - - - - + + + + - -
    RESTapi/cluster/counter/tables/smb2read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v3lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances smb2read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlRESTapi/cluster/counter/tables/svm_nfs_v4lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4.yaml
    -

    smb2_session_setup_latency

    -

    Average latency for SMB2_COM_SESSION_SETUP operations

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v41lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/smb2session_setup_latency
    Unit: microsec
    Type: average
    Base: session_setup_ops
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v42lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2session_setup_latency
    Unit: microsec
    Type: average
    Base: session_setup_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv3lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    smb2_session_setup_latency_histogram

    -

    Latency histogram for SMB2_COM_SESSION_SETUP operations

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/smb2session_setup_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yamlZAPIperf-object-get-instances nfsv4_1lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances smb2session_setup_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_2lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_session_setup_ops

    -

    Number of SMB2_COM_SESSION_SETUP operations

    +

    svm_nfs_lookup_total

    +

    Total number of Lookup procedure requests. It is the total number of lookup success and lookup error requests.

    @@ -52917,72 +58615,56 @@

    smb2_session_setup_ops

    - - - + + + - - - - + + + + - -
    RESTapi/cluster/counter/tables/smb2session_setup_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v3lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances smb2session_setup_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlRESTapi/cluster/counter/tables/svm_nfs_v4lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    -

    smb2_set_info_latency

    -

    Average latency for SMB2_COM_SET_INFO operations

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v41lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/smb2set_info_latency
    Unit: microsec
    Type: average
    Base: set_info_ops
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v42lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2set_info_latency
    Unit: microsec
    Type: average
    Base: set_info_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv3lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    smb2_set_info_latency_histogram

    -

    Latency histogram for SMB2_COM_SET_INFO operations

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/smb2set_info_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/smb2.yamlZAPIperf-object-get-instances nfsv4_1lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances smb2set_info_latency_histogram
    Unit: none
    Type: delta
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_2lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_set_info_ops

    -

    Number of SMB2_COM_SET_INFO operations

    +

    svm_nfs_lookupp_avg_latency

    +

    Average latency of LOOKUPP procedures

    @@ -52995,46 +58677,44 @@

    smb2_set_info_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - - + + + + - -
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4lookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances smb2set_info_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlRESTapi/cluster/counter/tables/svm_nfs_v41lookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    -

    smb2_tree_connect_latency

    -

    Average latency for SMB2_COM_TREE_CONNECT operations

    - - - - - - + + + + - - - - - - + + + + - - - + + + + + + + + +
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42lookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    RESTapi/cluster/counter/tables/smb2tree_connect_latency
    Unit: microsec
    Type: average
    Base: tree_connect_ops
    conf/restperf/9.14.1/smb2.yamlZAPIperf-object-get-instances nfsv4lookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances smb2tree_connect_latency
    Unit: microsec
    Type: average
    Base: tree_connect_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4_1lookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2lookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_tree_connect_ops

    -

    Number of SMB2_COM_TREE_CONNECT operations

    +

    svm_nfs_lookupp_total

    +

    Total number of LOOKUPP procedures

    @@ -53047,20 +58727,44 @@

    smb2_tree_connect_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v4lookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41lookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42lookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances smb2tree_connect_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv4lookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1lookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2lookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    smb2_write_latency

    -

    Average latency for SMB2_COM_WRITE operations

    +

    svm_nfs_mkdir_avg_latency

    +

    Average latency of MkDir procedure requests. The counter keeps track of the average response time of MkDir requests.

    @@ -53073,20 +58777,20 @@

    smb2_write_latencyUnit: microsec
    Type: average
    Base: write_ops -

    + + + - - - + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v3mkdir.average_latency
    Unit: microsec
    Type: average
    Base: mkdir.total
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances smb2write_latency
    Unit: microsec
    Type: average
    Base: write_latency_base
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv3mkdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: mkdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    smb2_write_ops

    -

    Number of SMB2_COM_WRITE operations

    +

    svm_nfs_mkdir_total

    +

    Total number MkDir of procedure requests. It is the total number of MkDir success and MkDir error requests.

    @@ -53099,20 +58803,20 @@

    smb2_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.14.1/smb2.yamlapi/cluster/counter/tables/svm_nfs_v3mkdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances smb2write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/smb2.yamlperf-object-get-instances nfsv3mkdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    snapmirror_break_failed_count

    -

    The number of failed SnapMirror break operations for the relationship

    +

    svm_nfs_mknod_avg_latency

    +

    Average latency of MkNod procedure requests. The counter keeps track of the average response time of MkNod requests.

    @@ -53125,20 +58829,20 @@

    snapmirror_break_failed_countUnit: microsec
    Type: average
    Base: mknod.total +

    - - - + + +
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.break-failed-countconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv3mknod_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: mknod_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    snapmirror_break_successful_count

    -

    The number of successful SnapMirror break operations for the relationship

    +

    svm_nfs_mknod_total

    +

    Total number MkNod of procedure requests. It is the total number of MkNod success and MkNod error requests.

    @@ -53151,20 +58855,20 @@

    snapmirror_break_successful_count

    - - - + + + - - - + + +
    RESTapi/private/cli/snapmirrorbreak_successful_countconf/rest/9.12.0/snapmirror.yamlapi/cluster/counter/tables/svm_nfs_v3mknod.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.break-successful-countconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv3mknod_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    snapmirror_lag_time

    -

    Amount of time since the last snapmirror transfer in seconds

    +

    svm_nfs_null_avg_latency

    +

    Average latency of Null procedure requests.

    @@ -53177,72 +58881,56 @@

    snapmirror_lag_timeUnit: microsec
    Type: average
    Base: null.total +

    - - - - + + + + - -
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.lag-timeconf/zapi/cdot/9.8.0/snapmirror.yamlRESTapi/cluster/counter/tables/svm_nfs_v4null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4.yaml
    -

    snapmirror_last_transfer_duration

    -

    Duration of the last SnapMirror transfer in seconds

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v41null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/private/cli/snapmirrorlast_transfer_durationconf/rest/9.12.0/snapmirror.yamlapi/cluster/counter/tables/svm_nfs_v42null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.last-transfer-durationconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv3null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    snapmirror_last_transfer_end_timestamp

    -

    The Timestamp of the end of the last transfer

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    RESTapi/private/cli/snapmirrorlast_transfer_end_timestampconf/rest/9.12.0/snapmirror.yamlZAPIperf-object-get-instances nfsv4_1null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.last-transfer-end-timestampconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv4_2null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    snapmirror_last_transfer_size

    -

    Size in kilobytes (1024 bytes) of the last transfer

    +

    svm_nfs_null_total

    +

    Total number of Null procedure requests. It is the total of null success and null error requests.

    @@ -53255,72 +58943,56 @@

    snapmirror_last_transfer_sizeUnit: none
    Type: rate
    Base: +

    - - - - + + + + - -
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.last-transfer-sizeconf/zapi/cdot/9.8.0/snapmirror.yamlRESTapi/cluster/counter/tables/svm_nfs_v4null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    -

    snapmirror_newest_snapshot_timestamp

    -

    The timestamp of the newest Snapshot copy on the destination volume

    - - - - - - + + + + - - - - - + + + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v41null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/private/cli/snapmirrornewest_snapshot_timestampconf/rest/9.12.0/snapmirror.yamlapi/cluster/counter/tables/svm_nfs_v42null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.newest-snapshot-timestampconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv3null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    snapmirror_resync_failed_count

    -

    The number of failed SnapMirror resync operations for the relationship

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    RESTapi/private/cli/snapmirrorresync_failed_countconf/rest/9.12.0/snapmirror.yamlZAPIperf-object-get-instances nfsv4_1null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.resync-failed-countconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv4_2null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    snapmirror_resync_successful_count

    -

    The number of successful SnapMirror resync operations for the relationship

    +

    svm_nfs_nverify_avg_latency

    +

    Average latency of NVERIFY procedures

    @@ -53333,20 +59005,44 @@

    snapmirror_resync_successful_count

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/private/cli/snapmirrorresync_successful_countconf/rest/9.12.0/snapmirror.yamlapi/cluster/counter/tables/svm_nfs_v4nverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41nverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42nverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.resync-successful-countconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv4nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    snapmirror_total_transfer_bytes

    -

    Cumulative bytes transferred for the relationship

    +

    svm_nfs_nverify_total

    +

    Total number of NVERIFY procedures

    @@ -53359,20 +59055,44 @@

    snapmirror_total_transfer_bytes

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/private/cli/snapmirrortotal_transfer_bytesconf/rest/9.12.0/snapmirror.yamlapi/cluster/counter/tables/svm_nfs_v4nverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41nverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42nverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.total-transfer-bytesconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv4nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    snapmirror_total_transfer_time_secs

    -

    Cumulative total transfer time in seconds for the relationship

    +

    svm_nfs_open_avg_latency

    +

    Average latency of OPEN procedures

    @@ -53385,20 +59105,44 @@

    snapmirror_total_transfer_time_secs

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/private/cli/snapmirrortotal_transfer_time_secsconf/rest/9.12.0/snapmirror.yamlapi/cluster/counter/tables/svm_nfs_v4open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.total-transfer-time-secsconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv4open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    snapmirror_update_failed_count

    -

    The number of successful SnapMirror update operations for the relationship

    +

    svm_nfs_open_confirm_avg_latency

    +

    Average latency of OPEN_CONFIRM procedures

    @@ -53411,20 +59155,20 @@

    snapmirror_update_failed_countUnit: microsec
    Type: average
    Base: open_confirm.total +

    - - - + + +
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.update-failed-countconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv4open_confirm_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_confirm_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    snapmirror_update_successful_count

    -

    Number of Successful Updates

    +

    svm_nfs_open_confirm_total

    +

    Total number of OPEN_CONFIRM procedures

    @@ -53437,20 +59181,20 @@

    snapmirror_update_successful_count

    - - - + + + - - - + + +
    RESTapi/private/cli/snapmirrorupdate_successful_countconf/rest/9.12.0/snapmirror.yamlapi/cluster/counter/tables/svm_nfs_v4open_confirm.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIsnapmirror-get-itersnapmirror-info.update-successful-countconf/zapi/cdot/9.8.0/snapmirror.yamlperf-object-get-instances nfsv4open_confirm_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    snapshot_policy_total_schedules

    -

    Total Number of Schedules in this Policy

    +

    svm_nfs_open_downgrade_avg_latency

    +

    Average latency of OPEN_DOWNGRADE procedures

    @@ -53463,20 +59207,44 @@

    snapshot_policy_total_schedules

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/private/cli/snapshot/policytotal_schedulesconf/rest/9.12.0/snapshotpolicy.yamlapi/cluster/counter/tables/svm_nfs_v4open_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41open_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42open_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIsnapshot-policy-get-itersnapshot-policy-info.total-schedulesconf/zapi/cdot/9.8.0/snapshotpolicy.yamlperf-object-get-instances nfsv4open_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1open_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2open_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_cifs_connections

    -

    Number of connections

    +

    svm_nfs_open_downgrade_total

    +

    Total number of OPEN_DOWNGRADE procedures

    @@ -53489,20 +59257,44 @@

    svm_cifs_connectionsUnit: none
    Type: raw
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v4open_downgrade.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41open_downgrade.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42open_downgrade.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances cifs:vserverconnections
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv4open_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1open_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2open_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_cifs_established_sessions

    -

    Number of established SMB and SMB2 sessions

    +

    svm_nfs_open_total

    +

    Total number of OPEN procedures

    @@ -53515,20 +59307,44 @@

    svm_cifs_established_sessionsUnit: none
    Type: raw
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v4open.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41open.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42open.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances cifs:vserverestablished_sessions
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv4open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_cifs_latency

    -

    Average latency for CIFS operations

    +

    svm_nfs_openattr_avg_latency

    +

    Average latency of OPENATTR procedures

    @@ -53541,20 +59357,44 @@

    svm_cifs_latencyUnit: microsec
    Type: average
    Base: latency_base -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v4openattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41openattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42openattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances cifs:vservercifs_latency
    Unit: microsec
    Type: average
    Base: cifs_latency_base
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv4openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_cifs_op_count

    -

    Array of select CIFS operation counts

    +

    svm_nfs_openattr_total

    +

    Total number of OPENATTR procedures

    @@ -53567,20 +59407,44 @@

    svm_cifs_op_countUnit: none
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v4openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances cifs:vservercifs_op_count
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv4openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_cifs_open_files

    -

    Number of open files over SMB and SMB2

    +

    svm_nfs_ops

    +

    Total number of NFSv3 procedure requests per second.

    @@ -53593,72 +59457,56 @@

    svm_cifs_open_filesUnit: none
    Type: raw
    Base: -

    + + + - - - - + + + + - -
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v3ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances cifs:vserveropen_files
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlRESTapi/cluster/counter/tables/svm_nfs_v4total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    -

    svm_cifs_ops

    -

    Total number of CIFS operations

    - - - - - - + + + + - - - + - + - - - + + + - -
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v41total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_cifsapi/cluster/counter/tables/svm_nfs_v42 total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/cifs_vserver.yamlconf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances cifs:vservercifs_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv3nfsv3_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_cifs_read_latency

    -

    Average latency for CIFS read operations

    - - - - - - + + + + - - - - - - + + + + - - - + + +
    APIEndpointMetricTemplateZAPIperf-object-get-instances nfsv4total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_cifsaverage_read_latency
    Unit: microsec
    Type: average
    Base: total_read_ops
    conf/restperf/9.12.0/cifs_vserver.yamlZAPIperf-object-get-instances nfsv4_1total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances cifs:vservercifs_read_latency
    Unit: microsec
    Type: average
    Base: cifs_read_ops
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv4_2total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_cifs_read_ops

    -

    Total number of CIFS read operations

    +

    svm_nfs_pathconf_avg_latency

    +

    Average latency of PathConf procedure requests. The counter keeps track of the average response time of PathConf requests.

    @@ -53671,20 +59519,20 @@

    svm_cifs_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v3pathconf.average_latency
    Unit: microsec
    Type: average
    Base: pathconf.total
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances cifs:vservercifs_read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv3pathconf_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: pathconf_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_cifs_signed_sessions

    -

    Number of signed SMB and SMB2 sessions.

    +

    svm_nfs_pathconf_total

    +

    Total number PathConf of procedure requests. It is the total number of PathConf success and PathConf error requests.

    @@ -53697,20 +59545,20 @@

    svm_cifs_signed_sessions

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_cifssigned_sessions
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v3pathconf.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances cifs:vserversigned_sessions
    Unit: none
    Type: raw
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv3pathconf_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_cifs_write_latency

    -

    Average latency for CIFS write operations

    +

    svm_nfs_putfh_avg_latency

    +

    Average latency of PUTFH procedures

    @@ -53723,20 +59571,44 @@

    svm_cifs_write_latency

    - - - + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    RESTapi/cluster/counter/tables/svm_cifsaverage_write_latency
    Unit: microsec
    Type: average
    Base: total_write_ops
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v4putfh.average_latency
    Unit: microsec
    Type: average
    Base: putfh.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41putfh.average_latency
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42putfh.average_latency
    Unit: microsec
    Type: average
    Base: putfh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances cifs:vservercifs_write_latency
    Unit: microsec
    Type: average
    Base: cifs_write_ops
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv4putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_cifs_write_ops

    -

    Total number of CIFS write operations

    +

    svm_nfs_putfh_total

    +

    Total number of PUTFH procedures

    @@ -53749,20 +59621,44 @@

    svm_cifs_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
    conf/restperf/9.12.0/cifs_vserver.yamlapi/cluster/counter/tables/svm_nfs_v4putfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41putfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42putfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances cifs:vservercifs_write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/cifs_vserver.yamlperf-object-get-instances nfsv4putfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1putfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2putfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_access_avg_latency

    -

    Average latency of Access procedure requests. The counter keeps track of the average response time of Access requests.

    +

    svm_nfs_putpubfh_avg_latency

    +

    Average latency of PUTPUBFH procedures

    @@ -53775,56 +59671,44 @@

    svm_nfs_access_avg_latencyUnit: microsec
    Type: average
    Base: access.total -

    - - - - + - + - + - - - - - - - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4access.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    putpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41access.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    putpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42access.average_latency
    Unit: microsec
    Type: average
    Base: access.total
    putpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3access_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4access_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    putpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1access_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    putpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2access_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: access_total
    putpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_access_total

    -

    Total number of Access procedure requests. It is the total number of access success and access error requests.

    +

    svm_nfs_putpubfh_total

    +

    Total number of PUTPUBFH procedures

    @@ -53837,56 +59721,44 @@

    svm_nfs_access_totalUnit: none
    Type: rate
    Base: -

    - - - - + - + - + - - - - - - - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4access.total
    Unit: none
    Type: rate
    Base:
    putpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41access.total
    Unit: none
    Type: rate
    Base:
    putpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42access.total
    Unit: none
    Type: rate
    Base:
    putpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3access_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4access_total
    Unit: none
    Type: rate
    Base:
    putpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1access_total
    Unit: none
    Type: rate
    Base:
    putpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2access_total
    Unit: none
    Type: rate
    Base:
    putpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_backchannel_ctl_avg_latency

    -

    Average latency of BACKCHANNEL_CTL operations.

    +

    svm_nfs_putrootfh_avg_latency

    +

    Average latency of PUTROOTFH procedures

    @@ -53899,32 +59771,44 @@

    svm_nfs_backchannel_ctl_avg_latency

    + + + + + + - + - + + + + + + + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v4putrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41backchannel_ctl.average_latency
    Unit: microsec
    Type: average
    Base: backchannel_ctl.total
    putrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42backchannel_ctl.average_latency
    Unit: microsec
    Type: average
    Base: backchannel_ctl.total
    putrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4putrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1backchannel_ctl_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: backchannel_ctl_total
    putrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2backchannel_ctl_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: backchannel_ctl_total
    putrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_backchannel_ctl_total

    -

    Total number of BACKCHANNEL_CTL operations.

    +

    svm_nfs_putrootfh_total

    +

    Total number of PUTROOTFH procedures

    @@ -53937,32 +59821,44 @@

    svm_nfs_backchannel_ctl_totalUnit: none
    Type: rate
    Base: +

    + + + - + - + + + + + + + - + - +
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41backchannel_ctl.total
    Unit: none
    Type: rate
    Base:
    putrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42backchannel_ctl.total
    Unit: none
    Type: rate
    Base:
    putrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4putrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1backchannel_ctl_total
    Unit: none
    Type: rate
    Base:
    putrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2backchannel_ctl_total
    Unit: none
    Type: rate
    Base:
    putrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_bind_conn_to_session_avg_latency

    -

    Average latency of BIND_CONN_TO_SESSION operations.

    +

    svm_nfs_read_avg_latency

    +

    Average latency of Read procedure requests. The counter keeps track of the average response time of Read requests.

    @@ -53975,32 +59871,56 @@

    svm_nfs_bind_conn_to_session_a

    + + + + + + + + + + + + - + - + + + + + + + + + + + + + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v3read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41bind_connections_to_session.average_latency
    Unit: microsec
    Type: average
    Base: bind_connections_to_session.total
    read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42bind_conn_to_session.average_latency
    Unit: microsec
    Type: average
    Base: bind_conn_to_session.total
    read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1bind_conn_to_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: bind_conn_to_session_total
    read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2bind_conn_to_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: bind_conn_to_session_total
    read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_bind_conn_to_session_total

    -

    Total number of BIND_CONN_TO_SESSION operations.

    +

    svm_nfs_read_ops

    +

    Total observed NFSv3 read operations per second.

    @@ -54013,32 +59933,20 @@

    svm_nfs_bind_conn_to_session_total

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41bind_connections_to_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42bind_conn_to_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1bind_conn_to_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v3read_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4_2bind_conn_to_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv3nfsv3_read_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_nfs_close_avg_latency

    -

    Average latency of CLOSE procedures

    + +

    Average latency of ReadSymLink procedure requests. The counter keeps track of the average response time of ReadSymLink requests.

    @@ -54051,44 +59959,46 @@

    svm_nfs_close_avg_latencyUnit: microsec
    Type: average
    Base: close.total -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41close.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v3read_symlink.average_latency
    Unit: microsec
    Type: average
    Base: read_symlink.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42close.average_latency
    Unit: microsec
    Type: average
    Base: close.total
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances nfsv3read_symlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_symlink_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    + +

    Total number of ReadSymLink procedure requests. It is the total number of read symlink success and read symlink error requests.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4close_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1close_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/svm_nfs_v3read_symlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4_2close_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: close_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv3read_symlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_nfs_close_total

    -

    Total number of CLOSE procedures

    +

    svm_nfs_read_throughput

    +

    Rate of NFSv3 read data transfers per second.

    @@ -54101,44 +60011,56 @@

    svm_nfs_close_totalUnit: b_per_sec
    Type: rate
    Base: +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4close.total
    Unit: none
    Type: rate
    Base:
    total.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41close.total
    Unit: none
    Type: rate
    Base:
    total.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42close.total
    Unit: none
    Type: rate
    Base:
    total.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4close_total
    Unit: none
    Type: rate
    Base:
    nfs4_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1close_total
    Unit: none
    Type: rate
    Base:
    nfs41_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2close_total
    Unit: none
    Type: rate
    Base:
    nfs42_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_commit_avg_latency

    -

    Average latency of Commit procedure requests. The counter keeps track of the average response time of Commit requests.

    +

    svm_nfs_read_total

    +

    Total number Read of procedure requests. It is the total number of read success and read error requests.

    @@ -54152,55 +60074,55 @@

    svm_nfs_commit_avg_latencyUnit: microsec
    Type: average
    Base: commit.total +

    - + - + - + - + - + - + - +
    read.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4commit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    read.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41commit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    read.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42commit.average_latency
    Unit: microsec
    Type: average
    Base: commit.total
    read.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv3commit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    read_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4commit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    read_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1commit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    read_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2commit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: commit_total
    read_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_commit_total

    -

    Total number of Commit procedure requests. It is the total number of Commit success and Commit error requests.

    +

    svm_nfs_readdir_avg_latency

    +

    Average latency of ReadDir procedure requests. The counter keeps track of the average response time of ReadDir requests.

    @@ -54214,55 +60136,55 @@

    svm_nfs_commit_totalUnit: none
    Type: rate
    Base: +

    - + - + - + - + - + - + - +
    readdir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4commit.total
    Unit: none
    Type: rate
    Base:
    readdir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41commit.total
    Unit: none
    Type: rate
    Base:
    readdir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42commit.total
    Unit: none
    Type: rate
    Base:
    readdir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv3commit_total
    Unit: none
    Type: rate
    Base:
    readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4commit_total
    Unit: none
    Type: rate
    Base:
    readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1commit_total
    Unit: none
    Type: rate
    Base:
    readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2commit_total
    Unit: none
    Type: rate
    Base:
    readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_create_avg_latency

    -

    Average latency of Create procedure requests. The counter keeps track of the average response time of Create requests.

    +

    svm_nfs_readdir_total

    +

    Total number ReadDir of procedure requests. It is the total number of ReadDir success and ReadDir error requests.

    @@ -54276,55 +60198,55 @@

    svm_nfs_create_avg_latencyUnit: microsec
    Type: average
    Base: create.total +

    - + - + - + - + - + - + - +
    readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4create.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41create.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42create.average_latency
    Unit: microsec
    Type: average
    Base: create.total
    readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv3create_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4create_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1create_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2create_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_total
    readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_create_session_avg_latency

    -

    Average latency of CREATE_SESSION operations.

    +

    svm_nfs_readdirplus_avg_latency

    +

    Average latency of ReadDirPlus procedure requests. The counter keeps track of the average response time of ReadDirPlus requests.

    @@ -54337,32 +60259,20 @@

    svm_nfs_create_session_avg_latency

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41create_session.average_latency
    Unit: microsec
    Type: average
    Base: create_session.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42create_session.average_latency
    Unit: microsec
    Type: average
    Base: create_session.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1create_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_session_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v3readdirplus.average_latency
    Unit: microsec
    Type: average
    Base: readdirplus.total
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4_2create_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_session_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv3readdirplus_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdirplus_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_nfs_create_session_total

    -

    Total number of CREATE_SESSION operations.

    +

    svm_nfs_readdirplus_total

    +

    Total number ReadDirPlus of procedure requests. It is the total number of ReadDirPlus success and ReadDirPlus error requests.

    @@ -54375,32 +60285,20 @@

    svm_nfs_create_session_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42create_session.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1create_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v3readdirplus.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4_2create_session_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv3readdirplus_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_nfs_create_total

    -

    Total number Create of procedure requests. It is the total number of create success and create error requests.

    + +

    Average latency of READLINK procedures

    @@ -54413,56 +60311,44 @@

    svm_nfs_create_totalUnit: none
    Type: rate
    Base: -

    - - - - + - + - + - - - - - - - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4create.total
    Unit: none
    Type: rate
    Base:
    readlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41create.total
    Unit: none
    Type: rate
    Base:
    readlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42create.total
    Unit: none
    Type: rate
    Base:
    readlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3create_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4create_total
    Unit: none
    Type: rate
    Base:
    readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1create_total
    Unit: none
    Type: rate
    Base:
    readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2create_total
    Unit: none
    Type: rate
    Base:
    readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_delegpurge_avg_latency

    -

    Average latency of DELEGPURGE procedures

    + +

    Total number of READLINK procedures

    @@ -54476,43 +60362,43 @@

    svm_nfs_delegpurge_avg_latencyUnit: microsec
    Type: average
    Base: delegpurge.total +

    - + - + - + - + - +
    readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41delegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42delegpurge.average_latency
    Unit: microsec
    Type: average
    Base: delegpurge.total
    readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4delegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1delegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2delegpurge_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegpurge_total
    readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_delegpurge_total

    -

    Total number of DELEGPURGE procedures

    +

    svm_nfs_reclaim_complete_avg_latency

    +

    Average latency of RECLAIM_COMPLETE operations.

    @@ -54525,44 +60411,32 @@

    svm_nfs_delegpurge_total

    - - - - - - - + - + - - - - - - - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v4delegpurge.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41delegpurge.total
    Unit: none
    Type: rate
    Base:
    reclaim_complete.average_latency
    Unit: microsec
    Type: average
    Base: reclaim_complete.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42delegpurge.total
    Unit: none
    Type: rate
    Base:
    reclaim_complete.average_latency
    Unit: microsec
    Type: average
    Base: reclaim_complete.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4delegpurge_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1delegpurge_total
    Unit: none
    Type: rate
    Base:
    reclaim_complete_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: reclaim_complete_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2delegpurge_total
    Unit: none
    Type: rate
    Base:
    reclaim_complete_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: reclaim_complete_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_delegreturn_avg_latency

    -

    Average latency of DELEGRETURN procedures

    +

    svm_nfs_reclaim_complete_total

    +

    Total number of RECLAIM_COMPLETE operations.

    @@ -54575,44 +60449,32 @@

    svm_nfs_delegreturn_avg_latency

    - - - - - - - + - + - - - - - - - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v4delegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41delegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    reclaim_complete.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42delegreturn.average_latency
    Unit: microsec
    Type: average
    Base: delegreturn.total
    reclaim_complete.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4delegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1delegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    reclaim_complete_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2delegreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delegreturn_total
    reclaim_complete_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_delegreturn_total

    -

    Total number of DELEGRETURN procedures

    +

    svm_nfs_release_lock_owner_avg_latency

    +

    Average Latency of RELEASE_LOCKOWNER procedures

    @@ -54626,81 +60488,107 @@

    svm_nfs_delegreturn_totalUnit: none
    Type: rate
    Base: +

    - - - - - - - - - - - - - + + +
    release_lock_owner.average_latency
    Unit: microsec
    Type: average
    Base: release_lock_owner.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41delegreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42delegreturn.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4delegreturn_total
    Unit: none
    Type: rate
    Base:
    release_lock_owner_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: release_lock_owner_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    +

    svm_nfs_release_lock_owner_total

    +

    Total number of RELEASE_LOCKOWNER procedures

    + + - - - - + + + + + + + + + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1delegreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4release_lock_owner.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_2delegreturn_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv4release_lock_owner_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    svm_nfs_destroy_clientid_avg_latency

    -

    Average latency of DESTROY_CLIENTID operations.

    +

    svm_nfs_remove_avg_latency

    +

    Average latency of Remove procedure requests. The counter keeps track of the average response time of Remove requests.

    - - - - + + + + + + + + + + + + + + + + + + - - - + - + + + + + + + + + + + + + - + - +
    APIEndpointMetricTemplateAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v3remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41destroy_clientid.average_latency
    Unit: microsec
    Type: average
    Base: destroy_clientid.total
    remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42destroy_clientid.average_latency
    Unit: microsec
    Type: average
    Base: destroy_clientid.total
    remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1destroy_clientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_clientid_total
    remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2destroy_clientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_clientid_total
    remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_destroy_clientid_total

    -

    Total number of DESTROY_CLIENTID operations.

    +

    svm_nfs_remove_total

    +

    Total number Remove of procedure requests. It is the total number of Remove success and Remove error requests.

    @@ -54713,32 +60601,56 @@

    svm_nfs_destroy_clientid_totalUnit: none
    Type: rate
    Base: +

    + + + + + + + + + - + - + + + + + + + + + + + + + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41destroy_clientid.total
    Unit: none
    Type: rate
    Base:
    remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42destroy_clientid.total
    Unit: none
    Type: rate
    Base:
    remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1destroy_clientid_total
    Unit: none
    Type: rate
    Base:
    remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2destroy_clientid_total
    Unit: none
    Type: rate
    Base:
    remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_destroy_session_avg_latency

    -

    Average latency of DESTROY_SESSION operations.

    +

    svm_nfs_rename_avg_latency

    +

    Average latency of Rename procedure requests. The counter keeps track of the average response time of Rename requests.

    @@ -54751,32 +60663,56 @@

    svm_nfs_destroy_session_avg_latency

    + + + + + + + + + + + + - + - + + + + + + + + + + + + + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v3rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41destroy_session.average_latency
    Unit: microsec
    Type: average
    Base: destroy_session.total
    rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42destroy_session.average_latency
    Unit: microsec
    Type: average
    Base: destroy_session.total
    rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1destroy_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_session_total
    rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2destroy_session_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: destroy_session_total
    rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_destroy_session_total

    -

    Total number of DESTROY_SESSION operations.

    +

    svm_nfs_rename_total

    +

    Total number Rename of procedure requests. It is the total number of Rename success and Rename error requests.

    @@ -54789,32 +60725,56 @@

    svm_nfs_destroy_session_totalUnit: none
    Type: rate
    Base: +

    + + + + + + + + + - + - + + + + + + + + + + + + + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41destroy_session.total
    Unit: none
    Type: rate
    Base:
    rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42destroy_session.total
    Unit: none
    Type: rate
    Base:
    rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1destroy_session_total
    Unit: none
    Type: rate
    Base:
    rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2destroy_session_total
    Unit: none
    Type: rate
    Base:
    rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_exchange_id_avg_latency

    -

    Average latency of EXCHANGE_ID operations.

    +

    svm_nfs_renew_avg_latency

    +

    Average latency of RENEW procedures

    @@ -54827,32 +60787,20 @@

    svm_nfs_exchange_id_avg_latency

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41exchange_id.average_latency
    Unit: microsec
    Type: average
    Base: exchange_id.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42exchange_id.average_latency
    Unit: microsec
    Type: average
    Base: exchange_id.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1exchange_id_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: exchange_id_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v4renew.average_latency
    Unit: microsec
    Type: average
    Base: renew.total
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_2exchange_id_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: exchange_id_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv4renew_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: renew_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    svm_nfs_exchange_id_total

    -

    Total number of EXCHANGE_ID operations.

    +

    svm_nfs_renew_total

    +

    Total number of RENEW procedures

    @@ -54865,32 +60813,20 @@

    svm_nfs_exchange_id_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42exchange_id.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1exchange_id_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v4renew.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_2exchange_id_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv4renew_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    svm_nfs_free_stateid_avg_latency

    -

    Average latency of FREE_STATEID operations.

    +

    svm_nfs_restorefh_avg_latency

    +

    Average latency of RESTOREFH procedures

    @@ -54903,32 +60839,44 @@

    svm_nfs_free_stateid_avg_latency

    + + + + + + - + - + + + + + + + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v4restorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41free_stateid.average_latency
    Unit: microsec
    Type: average
    Base: free_stateid.total
    restorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42free_stateid.average_latency
    Unit: microsec
    Type: average
    Base: free_stateid.total
    restorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4restorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1free_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: free_stateid_total
    restorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2free_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: free_stateid_total
    restorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_free_stateid_total

    -

    Total number of FREE_STATEID operations.

    +

    svm_nfs_restorefh_total

    +

    Total number of RESTOREFH procedures

    @@ -54941,32 +60889,44 @@

    svm_nfs_free_stateid_totalUnit: none
    Type: rate
    Base: +

    + + + - + - + + + + + + + - + - +
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41free_stateid.total
    Unit: none
    Type: rate
    Base:
    restorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42free_stateid.total
    Unit: none
    Type: rate
    Base:
    restorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4restorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1free_stateid_total
    Unit: none
    Type: rate
    Base:
    restorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2free_stateid_total
    Unit: none
    Type: rate
    Base:
    restorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_fsinfo_avg_latency

    -

    Average latency of FSInfo procedure requests. The counter keeps track of the average response time of FSInfo requests.

    +

    svm_nfs_rmdir_avg_latency

    +

    Average latency of RmDir procedure requests. The counter keeps track of the average response time of RmDir requests.

    @@ -54980,19 +60940,19 @@

    svm_nfs_fsinfo_avg_latencyUnit: microsec
    Type: average
    Base: fsinfo.total +

    - +
    rmdir.average_latency
    Unit: microsec
    Type: average
    Base: rmdir.total
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv3fsinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: fsinfo_total
    rmdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rmdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_nfs_fsinfo_total

    -

    Total number FSInfo of procedure requests. It is the total number of FSInfo success and FSInfo error requests.

    +

    svm_nfs_rmdir_total

    +

    Total number RmDir of procedure requests. It is the total number of RmDir success and RmDir error requests.

    @@ -55006,19 +60966,19 @@

    svm_nfs_fsinfo_totalUnit: none
    Type: rate
    Base: +

    - +
    rmdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv3fsinfo_total
    Unit: none
    Type: rate
    Base:
    rmdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_nfs_fsstat_avg_latency

    -

    Average latency of FSStat procedure requests. The counter keeps track of the average response time of FSStat requests.

    +

    svm_nfs_savefh_avg_latency

    +

    Average latency of SAVEFH procedures

    @@ -55031,46 +60991,44 @@

    svm_nfs_fsstat_avg_latencyUnit: microsec
    Type: average
    Base: fsstat.total -

    + + + - - - - + + + + - -
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/svm_nfs_v4savefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv3fsstat_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: fsstat_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlRESTapi/cluster/counter/tables/svm_nfs_v41savefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    -

    svm_nfs_fsstat_total

    -

    Total number FSStat of procedure requests. It is the total number of FSStat success and FSStat error requests.

    - - - - - - + + + + - - - - - - + + + + - - - + + + + + + + + +
    APIEndpointMetricTemplateRESTapi/cluster/counter/tables/svm_nfs_v42savefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v3fsstat.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yamlZAPIperf-object-get-instances nfsv4savefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv3fsstat_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances nfsv4_1savefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPIperf-object-get-instances nfsv4_2savefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_get_dir_delegation_avg_latency

    -

    Average latency of GET_DIR_DELEGATION operations.

    +

    svm_nfs_savefh_total

    +

    Total number of SAVEFH procedures

    @@ -55083,32 +61041,44 @@

    svm_nfs_get_dir_delegation_avg_l

    + + + + + + - + - + + + + + + + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v4savefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41get_dir_delegation.average_latency
    Unit: microsec
    Type: average
    Base: get_dir_delegation.total
    savefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42get_dir_delegation.average_latency
    Unit: microsec
    Type: average
    Base: get_dir_delegation.total
    savefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4savefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1get_dir_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_dir_delegation_total
    savefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2get_dir_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_dir_delegation_total
    savefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_get_dir_delegation_total

    -

    Total number of GET_DIR_DELEGATION operations.

    +

    svm_nfs_secinfo_avg_latency

    +

    Average latency of SECINFO procedures

    @@ -55121,32 +61091,44 @@

    svm_nfs_get_dir_delegation_total

    + + + + + + - + - + + + + + + + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v4secinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41get_dir_delegation.total
    Unit: none
    Type: rate
    Base:
    secinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42get_dir_delegation.total
    Unit: none
    Type: rate
    Base:
    secinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1get_dir_delegation_total
    Unit: none
    Type: rate
    Base:
    secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2get_dir_delegation_total
    Unit: none
    Type: rate
    Base:
    secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_getattr_avg_latency

    -

    Average latency of GetAttr procedure requests. This counter keeps track of the average response time of GetAttr requests.

    +

    svm_nfs_secinfo_no_name_avg_latency

    +

    Average latency of SECINFO_NO_NAME operations.

    @@ -55159,56 +61141,32 @@

    svm_nfs_getattr_avg_latencyUnit: microsec
    Type: average
    Base: getattr.total -

    - - - - - - - - - - + - + - - - - - - - - - - - - - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4getattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41getattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    secinfo_no_name.average_latency
    Unit: microsec
    Type: average
    Base: secinfo_no_name.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42getattr.average_latency
    Unit: microsec
    Type: average
    Base: getattr.total
    secinfo_no_name.average_latency
    Unit: microsec
    Type: average
    Base: secinfo_no_name.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3getattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4getattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1getattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    secinfo_no_name_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_no_name_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2getattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getattr_total
    secinfo_no_name_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_no_name_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_getattr_total

    -

    Total number of Getattr procedure requests. It is the total number of getattr success and getattr error requests.

    +

    svm_nfs_secinfo_no_name_total

    +

    Total number of SECINFO_NO_NAME operations.

    @@ -55221,56 +61179,32 @@

    svm_nfs_getattr_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - + - + - - - - - - - - - - - - - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4getattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41getattr.total
    Unit: none
    Type: rate
    Base:
    secinfo_no_name.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42getattr.total
    Unit: none
    Type: rate
    Base:
    secinfo_no_name.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3getattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4getattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1getattr_total
    Unit: none
    Type: rate
    Base:
    secinfo_no_name_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2getattr_total
    Unit: none
    Type: rate
    Base:
    secinfo_no_name_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_getdeviceinfo_avg_latency

    -

    Average latency of GETDEVICEINFO operations.

    +

    svm_nfs_secinfo_total

    +

    Total number of SECINFO procedures

    @@ -55283,32 +61217,44 @@

    svm_nfs_getdeviceinfo_avg_latency

    + + + + + + - + - + + + + + + + - + - +
    RESTapi/cluster/counter/tables/svm_nfs_v4secinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41getdeviceinfo.average_latency
    Unit: microsec
    Type: average
    Base: getdeviceinfo.total
    secinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42getdeviceinfo.average_latency
    Unit: microsec
    Type: average
    Base: getdeviceinfo.total
    secinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1getdeviceinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdeviceinfo_total
    secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2getdeviceinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdeviceinfo_total
    secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_getdeviceinfo_total

    -

    Total number of GETDEVICEINFO operations.

    +

    svm_nfs_sequence_avg_latency

    +

    Average latency of SEQUENCE operations.

    @@ -55322,31 +61268,31 @@

    svm_nfs_getdeviceinfo_totalUnit: none
    Type: rate
    Base: +

    - + - + - +
    sequence.average_latency
    Unit: microsec
    Type: average
    Base: sequence.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42getdeviceinfo.total
    Unit: none
    Type: rate
    Base:
    sequence.average_latency
    Unit: microsec
    Type: average
    Base: sequence.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4_1getdeviceinfo_total
    Unit: none
    Type: rate
    Base:
    sequence_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: sequence_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2getdeviceinfo_total
    Unit: none
    Type: rate
    Base:
    sequence_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: sequence_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_getdevicelist_avg_latency

    -

    Average latency of GETDEVICELIST operations.

    +

    svm_nfs_sequence_total

    +

    Total number of SEQUENCE operations.

    @@ -55360,31 +61306,31 @@

    svm_nfs_getdevicelist_avg_latency

    - + - + - + - +
    REST api/cluster/counter/tables/svm_nfs_v41getdevicelist.average_latency
    Unit: microsec
    Type: average
    Base: getdevicelist.total
    sequence.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42getdevicelist.average_latency
    Unit: microsec
    Type: average
    Base: getdevicelist.total
    sequence.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4_1getdevicelist_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdevicelist_total
    sequence_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2getdevicelist_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getdevicelist_total
    sequence_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_getdevicelist_total

    -

    Total number of GETDEVICELIST operations.

    +

    svm_nfs_set_ssv_avg_latency

    +

    Average latency of SET_SSV operations.

    @@ -55398,31 +61344,31 @@

    svm_nfs_getdevicelist_totalUnit: none
    Type: rate
    Base: +

    - + - + - +
    set_ssv.average_latency
    Unit: microsec
    Type: average
    Base: set_ssv.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42getdevicelist.total
    Unit: none
    Type: rate
    Base:
    set_ssv.average_latency
    Unit: microsec
    Type: average
    Base: set_ssv.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4_1getdevicelist_total
    Unit: none
    Type: rate
    Base:
    set_ssv_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: set_ssv_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2getdevicelist_total
    Unit: none
    Type: rate
    Base:
    set_ssv_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: set_ssv_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_getfh_avg_latency

    -

    Average latency of GETFH procedures

    +

    svm_nfs_set_ssv_total

    +

    Total number of SET_SSV operations.

    @@ -55435,44 +61381,32 @@

    svm_nfs_getfh_avg_latencyUnit: microsec
    Type: average
    Base: getfh.total -

    - - - - + - - - - - - - - + + - + - +
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41getfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    set_ssv.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42getfh.average_latency
    Unit: microsec
    Type: average
    Base: getfh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4getfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlset_ssv.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4_1getfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    set_ssv_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2getfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: getfh_total
    set_ssv_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_getfh_total

    -

    Total number of GETFH procedures

    +

    svm_nfs_setattr_avg_latency

    +

    Average latency of SetAttr procedure requests. The counter keeps track of the average response time of SetAttr requests.

    @@ -55485,44 +61419,56 @@

    svm_nfs_getfh_totalUnit: microsec
    Type: average
    Base: setattr.total +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4getfh.total
    Unit: none
    Type: rate
    Base:
    setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41getfh.total
    Unit: none
    Type: rate
    Base:
    setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42getfh.total
    Unit: none
    Type: rate
    Base:
    setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4getfh_total
    Unit: none
    Type: rate
    Base:
    setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1getfh_total
    Unit: none
    Type: rate
    Base:
    setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2getfh_total
    Unit: none
    Type: rate
    Base:
    setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_latency

    -

    Average latency of NFSv3 requests. This counter keeps track of the average response time of NFSv3 requests.

    +

    svm_nfs_setattr_total

    +

    Total number of Setattr procedure requests. It is the total number of Setattr success and setattr error requests.

    @@ -55536,55 +61482,55 @@

    svm_nfs_latencyUnit: microsec
    Type: average
    Base: total_ops +

    - + - + - + - + - + - + - +
    setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4latency
    Unit: microsec
    Type: average
    Base: total_ops
    setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41latency
    Unit: microsec
    Type: average
    Base: total_ops
    setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42latency
    Unit: microsec
    Type: average
    Base: total_ops
    setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv3latency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4latency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1latency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2latency
    Unit: microsec
    Type: average,no-zero-values
    Base: total_ops
    setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_layoutcommit_avg_latency

    -

    Average latency of LAYOUTCOMMIT operations.

    +

    svm_nfs_setclientid_avg_latency

    +

    Average latency of SETCLIENTID procedures

    @@ -55597,32 +61543,20 @@

    svm_nfs_layoutcommit_avg_latency

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41layoutcommit.average_latency
    Unit: microsec
    Type: average
    Base: layoutcommit.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutcommit.average_latency
    Unit: microsec
    Type: average
    Base: layoutcommit.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1layoutcommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutcommit_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v4setclientid.average_latency
    Unit: microsec
    Type: average
    Base: setclientid.total
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutcommit_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutcommit_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv4setclientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setclientid_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    svm_nfs_layoutcommit_total

    -

    Total number of LAYOUTCOMMIT operations.

    +

    svm_nfs_setclientid_confirm_avg_latency

    +

    Average latency of SETCLIENTID_CONFIRM procedures

    @@ -55635,32 +61569,20 @@

    svm_nfs_layoutcommit_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutcommit.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1layoutcommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v4setclientid_confirm.average_latency
    Unit: microsec
    Type: average
    Base: setclientid_confirm.total
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutcommit_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv4setclientid_confirm_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setclientid_confirm_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    svm_nfs_layoutget_avg_latency

    -

    Average latency of LAYOUTGET operations.

    +

    svm_nfs_setclientid_confirm_total

    +

    Total number of SETCLIENTID_CONFIRM procedures

    @@ -55673,32 +61595,46 @@

    svm_nfs_layoutget_avg_latencyUnit: microsec
    Type: average
    Base: layoutget.total -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v4setclientid_confirm.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutget.average_latency
    Unit: microsec
    Type: average
    Base: layoutget.total
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances nfsv4setclientid_confirm_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    +

    svm_nfs_setclientid_total

    +

    Total number of SETCLIENTID procedures

    + + - - - - + + + + + + + + + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4_1layoutget_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutget_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v4setclientid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutget_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutget_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv4setclientid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    -

    svm_nfs_layoutget_total

    -

    Total number of LAYOUTGET operations.

    + +

    Average latency of SymLink procedure requests. The counter keeps track of the average response time of SymLink requests.

    @@ -55711,32 +61647,46 @@

    svm_nfs_layoutget_total

    - - - + + + - - - - + + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41layoutget.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/svm_nfs_v3symlink.average_latency
    Unit: microsec
    Type: average
    Base: symlink.total
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42layoutget.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances nfsv3symlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: symlink_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    + +

    Total number SymLink of procedure requests. It is the total number of SymLink success and create SymLink requests.

    + + + + + + + + + - - - - + + + + - - - + + +
    APIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1layoutget_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/svm_nfs_v3symlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4_2layoutget_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances nfsv3symlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    -

    svm_nfs_layoutreturn_avg_latency

    -

    Average latency of LAYOUTRETURN operations.

    +

    svm_nfs_test_stateid_avg_latency

    +

    Average latency of TEST_STATEID operations.

    @@ -55750,31 +61700,31 @@

    svm_nfs_layoutreturn_avg_latency

    - + - + - + - +
    REST api/cluster/counter/tables/svm_nfs_v41layoutreturn.average_latency
    Unit: microsec
    Type: average
    Base: layoutreturn.total
    test_stateid.average_latency
    Unit: microsec
    Type: average
    Base: test_stateid.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42layoutreturn.average_latency
    Unit: microsec
    Type: average
    Base: layoutreturn.total
    test_stateid.average_latency
    Unit: microsec
    Type: average
    Base: test_stateid.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4_1layoutreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutreturn_total
    test_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: test_stateid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2layoutreturn_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: layoutreturn_total
    test_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: test_stateid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_layoutreturn_total

    -

    Total number of LAYOUTRETURN operations.

    +

    svm_nfs_test_stateid_total

    +

    Total number of TEST_STATEID operations.

    @@ -55788,31 +61738,31 @@

    svm_nfs_layoutreturn_totalUnit: none
    Type: rate
    Base: +

    - + - + - +
    test_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42layoutreturn.total
    Unit: none
    Type: rate
    Base:
    test_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4_1layoutreturn_total
    Unit: none
    Type: rate
    Base:
    test_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2layoutreturn_total
    Unit: none
    Type: rate
    Base:
    test_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    - -

    Average latency of Link procedure requests. The counter keeps track of the average response time of Link requests.

    +

    svm_nfs_throughput

    +

    Rate of NFSv3 data transfers per second.

    @@ -55826,55 +61776,55 @@ - + - + - + - + - + - + - + - +
    REST api/cluster/counter/tables/svm_nfs_v3link.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4link.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41link.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42link.average_latency
    Unit: microsec
    Type: average
    Base: link.total
    total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv3link_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    nfsv3_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4link_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    nfs4_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1link_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    nfs41_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2link_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: link_total
    nfs42_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    - -

    Total number Link of procedure requests. It is the total number of Link success and Link error requests.

    +

    svm_nfs_verify_avg_latency

    +

    Average latency of VERIFY procedures

    @@ -55887,56 +61837,44 @@ - - - - + - + - + - - - - - - - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4link.total
    Unit: none
    Type: rate
    Base:
    verify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41link.total
    Unit: none
    Type: rate
    Base:
    verify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42link.total
    Unit: none
    Type: rate
    Base:
    verify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3link_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4link_total
    Unit: none
    Type: rate
    Base:
    verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1link_total
    Unit: none
    Type: rate
    Base:
    verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2link_total
    Unit: none
    Type: rate
    Base:
    verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_lock_avg_latency

    -

    Average latency of LOCK procedures

    +

    svm_nfs_verify_total

    +

    Total number of VERIFY procedures

    @@ -55950,43 +61888,43 @@

    svm_nfs_lock_avg_latency

    - + - + - + - + - + - +
    REST api/cluster/counter/tables/svm_nfs_v4lock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    verify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41lock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    verify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42lock.average_latency
    Unit: microsec
    Type: average
    Base: lock.total
    verify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPI perf-object-get-instances nfsv4lock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1lock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2lock_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lock_total
    verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_lock_total

    -

    Total number of LOCK procedures

    +

    svm_nfs_want_delegation_avg_latency

    +

    Average latency of WANT_DELEGATION operations.

    @@ -55999,44 +61937,32 @@

    svm_nfs_lock_totalUnit: none
    Type: rate
    Base: -

    - - - - + - + - - - - - - - + - +
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41lock.total
    Unit: none
    Type: rate
    Base:
    want_delegation.average_latency
    Unit: microsec
    Type: average
    Base: want_delegation.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42lock.total
    Unit: none
    Type: rate
    Base:
    want_delegation.average_latency
    Unit: microsec
    Type: average
    Base: want_delegation.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4lock_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1lock_total
    Unit: none
    Type: rate
    Base:
    want_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: want_delegation_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2lock_total
    Unit: none
    Type: rate
    Base:
    want_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: want_delegation_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_lockt_avg_latency

    -

    Average latency of LOCKT procedures

    +

    svm_nfs_want_delegation_total

    +

    Total number of WANT_DELEGATION operations.

    @@ -56049,44 +61975,32 @@

    svm_nfs_lockt_avg_latencyUnit: microsec
    Type: average
    Base: lockt.total -

    - - - - + - + - - - - - - - + - +
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41lockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    want_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42lockt.average_latency
    Unit: microsec
    Type: average
    Base: lockt.total
    want_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4lockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1lockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    want_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2lockt_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lockt_total
    want_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_lockt_total

    -

    Total number of LOCKT procedures

    +

    svm_nfs_write_avg_latency

    +

    Average latency of Write procedure requests. The counter keeps track of the average response time of Write requests.

    @@ -56099,44 +62013,82 @@

    svm_nfs_lockt_totalUnit: microsec
    Type: average
    Base: write.total +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4lockt.total
    Unit: none
    Type: rate
    Base:
    write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41lockt.total
    Unit: none
    Type: rate
    Base:
    write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42lockt.total
    Unit: none
    Type: rate
    Base:
    write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4lockt_total
    Unit: none
    Type: rate
    Base:
    write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1lockt_total
    Unit: none
    Type: rate
    Base:
    write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2lockt_total
    Unit: none
    Type: rate
    Base:
    write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_locku_avg_latency

    -

    Average latency of LOCKU procedures

    +

    svm_nfs_write_ops

    +

    Total observed NFSv3 write operations per second.

    + + + + + + + + + + + + + + + + + + + + + + + +
    APIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v3write_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_write_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    +

    svm_nfs_write_throughput

    +

    Rate of NFSv3 write data transfers per second.

    @@ -56149,44 +62101,56 @@

    svm_nfs_locku_avg_latencyUnit: b_per_sec
    Type: rate
    Base: +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4locku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41locku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42locku.average_latency
    Unit: microsec
    Type: average
    Base: locku.total
    total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4locku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    nfs4_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1locku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    nfs41_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2locku_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: locku_total
    nfs42_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_locku_total

    -

    Total number of LOCKU procedures

    +

    svm_nfs_write_total

    +

    Total number of Write procedure requests. It is the total number of write success and write error requests.

    @@ -56199,44 +62163,56 @@

    svm_nfs_locku_totalUnit: none
    Type: rate
    Base: +

    + + + - + - + - + + + + + + + - + - + - +
    conf/restperf/9.12.0/nfsv3.yaml
    REST api/cluster/counter/tables/svm_nfs_v4locku.total
    Unit: none
    Type: rate
    Base:
    write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    REST api/cluster/counter/tables/svm_nfs_v41locku.total
    Unit: none
    Type: rate
    Base:
    write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    REST api/cluster/counter/tables/svm_nfs_v42locku.total
    Unit: none
    Type: rate
    Base:
    write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPI perf-object-get-instances nfsv4locku_total
    Unit: none
    Type: rate
    Base:
    write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPI perf-object-get-instances nfsv4_1locku_total
    Unit: none
    Type: rate
    Base:
    write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml
    ZAPI perf-object-get-instances nfsv4_2locku_total
    Unit: none
    Type: rate
    Base:
    write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml
    -

    svm_nfs_lookup_avg_latency

    -

    Average latency of LookUp procedure requests. This shows the average time it takes for the LookUp operation to reply to the request.

    +

    svm_ontaps3_svm_abort_multipart_upload_failed

    +

    Number of failed Abort Multipart Upload operations. svm_ontaps3_svm_abort_multipart_upload_failed is ontaps3_svm_abort_multipart_upload_failed aggregated by svm.

    @@ -56249,56 +62225,72 @@

    svm_nfs_lookup_avg_latencyUnit: microsec
    Type: average
    Base: lookup.total -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverabort_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_abort_multipart_upload_failed_client_close

    +

    Number of times Abort Multipart Upload operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_abort_multipart_upload_failed_client_close is ontaps3_svm_abort_multipart_upload_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42lookup.average_latency
    Unit: microsec
    Type: average
    Base: lookup.total
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverabort_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_abort_multipart_upload_latency

    +

    Average latency for Abort Multipart Upload operations. svm_ontaps3_svm_abort_multipart_upload_latency is ontaps3_svm_abort_multipart_upload_latency aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverabort_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: abort_multipart_upload_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2lookup_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookup_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverabort_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: abort_multipart_upload_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_lookup_total

    -

    Total number of Lookup procedure requests. It is the total number of lookup success and lookup error requests.

    +

    svm_ontaps3_svm_abort_multipart_upload_rate

    +

    Number of Abort Multipart Upload operations per second. svm_ontaps3_svm_abort_multipart_upload_rate is ontaps3_svm_abort_multipart_upload_rate aggregated by svm.

    @@ -56311,56 +62303,72 @@

    svm_nfs_lookup_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverabort_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_abort_multipart_upload_total

    +

    Number of Abort Multipart Upload operations. svm_ontaps3_svm_abort_multipart_upload_total is ontaps3_svm_abort_multipart_upload_total aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42lookup.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverabort_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverabort_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_allow_access

    +

    Number of times access was allowed. svm_ontaps3_svm_allow_access is ontaps3_svm_allow_access aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverallow_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2lookup_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverallow_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_lookupp_avg_latency

    -

    Average latency of LOOKUPP procedures

    +

    svm_ontaps3_svm_anonymous_access

    +

    Number of times anonymous access was allowed. svm_ontaps3_svm_anonymous_access is ontaps3_svm_anonymous_access aggregated by svm.

    @@ -56373,44 +62381,20 @@

    svm_nfs_lookupp_avg_latencyUnit: microsec
    Type: average
    Base: lookupp.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41lookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42lookupp.average_latency
    Unit: microsec
    Type: average
    Base: lookupp.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4lookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1lookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serveranonymous_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2lookupp_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: lookupp_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serveranonymous_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_lookupp_total

    -

    Total number of LOOKUPP procedures

    +

    svm_ontaps3_svm_anonymous_deny_access

    +

    Number of times anonymous access was denied. svm_ontaps3_svm_anonymous_deny_access is ontaps3_svm_anonymous_deny_access aggregated by svm.

    @@ -56423,44 +62407,46 @@

    svm_nfs_lookupp_totalUnit: none
    Type: rate
    Base: -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41lookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serveranonymous_deny_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42lookupp.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serveranonymous_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_authentication_failures

    +

    Number of authentication failures. svm_ontaps3_svm_authentication_failures is ontaps3_svm_authentication_failures aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4lookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1lookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverauthentication_failures
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2lookupp_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverauthentication_failures
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_mkdir_avg_latency

    -

    Average latency of MkDir procedure requests. The counter keeps track of the average response time of MkDir requests.

    +

    svm_ontaps3_svm_chunked_upload_reqs

    +

    Total number of object store server chunked object upload requests. svm_ontaps3_svm_chunked_upload_reqs is ontaps3_svm_chunked_upload_reqs aggregated by svm.

    @@ -56473,20 +62459,20 @@

    svm_nfs_mkdir_avg_latencyUnit: microsec
    Type: average
    Base: mkdir.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverchunked_upload_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3mkdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: mkdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverchunked_upload_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_mkdir_total

    -

    Total number MkDir of procedure requests. It is the total number of MkDir success and MkDir error requests.

    +

    svm_ontaps3_svm_complete_multipart_upload_failed

    +

    Number of failed Complete Multipart Upload operations. svm_ontaps3_svm_complete_multipart_upload_failed is ontaps3_svm_complete_multipart_upload_failed aggregated by svm.

    @@ -56499,20 +62485,20 @@

    svm_nfs_mkdir_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3mkdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_mknod_avg_latency

    -

    Average latency of MkNod procedure requests. The counter keeps track of the average response time of MkNod requests.

    +

    svm_ontaps3_svm_complete_multipart_upload_failed_client_close

    +

    Number of times Complete Multipart Upload operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_complete_multipart_upload_failed_client_close is ontaps3_svm_complete_multipart_upload_failed_client_close aggregated by svm.

    @@ -56525,20 +62511,20 @@

    svm_nfs_mknod_avg_latencyUnit: microsec
    Type: average
    Base: mknod.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3mknod_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: mknod_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_mknod_total

    -

    Total number MkNod of procedure requests. It is the total number of MkNod success and MkNod error requests.

    +

    svm_ontaps3_svm_complete_multipart_upload_latency

    +

    Average latency for Complete Multipart Upload operations. svm_ontaps3_svm_complete_multipart_upload_latency is ontaps3_svm_complete_multipart_upload_latency aggregated by svm.

    @@ -56551,20 +62537,20 @@

    svm_nfs_mknod_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: complete_multipart_upload_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3mknod_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: complete_multipart_upload_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_null_avg_latency

    -

    Average latency of Null procedure requests.

    +

    svm_ontaps3_svm_complete_multipart_upload_rate

    +

    Number of Complete Multipart Upload operations per second. svm_ontaps3_svm_complete_multipart_upload_rate is ontaps3_svm_complete_multipart_upload_rate aggregated by svm.

    @@ -56577,56 +62563,72 @@

    svm_nfs_null_avg_latency

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_servercomplete_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_complete_multipart_upload_total

    +

    Number of Complete Multipart Upload operations. svm_ontaps3_svm_complete_multipart_upload_total is ontaps3_svm_complete_multipart_upload_total aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42null.average_latency
    Unit: microsec
    Type: average
    Base: null.total
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_servercomplete_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_servercomplete_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_connected_connections

    +

    Number of object store server connections currently established. svm_ontaps3_svm_connected_connections is ontaps3_svm_connected_connections aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverconnected_connections
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2null_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: null_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverconnected_connections
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_null_total

    -

    Total number of Null procedure requests. It is the total of null success and null error requests.

    +

    svm_ontaps3_svm_connections

    +

    Total number of object store server connections. svm_ontaps3_svm_connections is ontaps3_svm_connections aggregated by svm.

    @@ -56639,56 +62641,72 @@

    svm_nfs_null_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverconnections
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverconnections
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_create_bucket_failed

    +

    Number of failed Create Bucket operations. svm_ontaps3_svm_create_bucket_failed is ontaps3_svm_create_bucket_failed aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42null.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_servercreate_bucket_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_servercreate_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_create_bucket_failed_client_close

    +

    Number of times Create Bucket operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_create_bucket_failed_client_close is ontaps3_svm_create_bucket_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_servercreate_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2null_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_servercreate_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_nverify_avg_latency

    -

    Average latency of NVERIFY procedures

    +

    svm_ontaps3_svm_create_bucket_latency

    +

    Average latency for Create Bucket operations. svm_ontaps3_svm_create_bucket_latency is ontaps3_svm_create_bucket_latency aggregated by svm.

    @@ -56701,44 +62719,46 @@

    svm_nfs_nverify_avg_latencyUnit: microsec
    Type: average
    Base: nverify.total -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41nverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_servercreate_bucket_latency
    Unit: microsec
    Type: average
    Base: create_bucket_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42nverify.average_latency
    Unit: microsec
    Type: average
    Base: nverify.total
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_servercreate_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: create_bucket_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_create_bucket_rate

    +

    Number of Create Bucket operations per second. svm_ontaps3_svm_create_bucket_rate is ontaps3_svm_create_bucket_rate aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_servercreate_bucket_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2nverify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: nverify_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_servercreate_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_nverify_total

    -

    Total number of NVERIFY procedures

    +

    svm_ontaps3_svm_create_bucket_total

    +

    Number of Create Bucket operations. svm_ontaps3_svm_create_bucket_total is ontaps3_svm_create_bucket_total aggregated by svm.

    @@ -56751,44 +62771,46 @@

    svm_nfs_nverify_totalUnit: none
    Type: rate
    Base: -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41nverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_servercreate_bucket_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42nverify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_servercreate_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_default_deny_access

    +

    Number of times access was denied by default and not through any policy statement. svm_ontaps3_svm_default_deny_access is ontaps3_svm_default_deny_access aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverdefault_deny_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2nverify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverdefault_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_open_avg_latency

    -

    Average latency of OPEN procedures

    +

    svm_ontaps3_svm_delete_bucket_failed

    +

    Number of failed Delete Bucket operations. svm_ontaps3_svm_delete_bucket_failed is ontaps3_svm_delete_bucket_failed aggregated by svm.

    @@ -56801,44 +62823,46 @@

    svm_nfs_open_avg_latency

    - - - - - - - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42open.average_latency
    Unit: microsec
    Type: average
    Base: open.total
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverdelete_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_delete_bucket_failed_client_close

    +

    Number of times Delete Bucket operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_delete_bucket_failed_client_close is ontaps3_svm_delete_bucket_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverdelete_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2open_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverdelete_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_open_confirm_avg_latency

    -

    Average latency of OPEN_CONFIRM procedures

    +

    svm_ontaps3_svm_delete_bucket_latency

    +

    Average latency for Delete Bucket operations. svm_ontaps3_svm_delete_bucket_latency is ontaps3_svm_delete_bucket_latency aggregated by svm.

    @@ -56851,20 +62875,20 @@

    svm_nfs_open_confirm_avg_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4open_confirm.average_latency
    Unit: microsec
    Type: average
    Base: open_confirm.total
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_latency
    Unit: microsec
    Type: average
    Base: delete_bucket_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4open_confirm_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_confirm_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverdelete_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_bucket_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_open_confirm_total

    -

    Total number of OPEN_CONFIRM procedures

    +

    svm_ontaps3_svm_delete_bucket_rate

    +

    Number of Delete Bucket operations per second. svm_ontaps3_svm_delete_bucket_rate is ontaps3_svm_delete_bucket_rate aggregated by svm.

    @@ -56877,20 +62901,20 @@

    svm_nfs_open_confirm_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4open_confirm_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverdelete_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_open_downgrade_avg_latency

    -

    Average latency of OPEN_DOWNGRADE procedures

    +

    svm_ontaps3_svm_delete_bucket_total

    +

    Number of Delete Bucket operations. svm_ontaps3_svm_delete_bucket_total is ontaps3_svm_delete_bucket_total aggregated by svm.

    @@ -56903,44 +62927,20 @@

    svm_nfs_open_downgrade_avg_latency

    - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4open_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41open_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42open_downgrade.average_latency
    Unit: microsec
    Type: average
    Base: open_downgrade.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4open_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1open_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverdelete_bucket_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2open_downgrade_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: open_downgrade_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverdelete_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_open_downgrade_total

    -

    Total number of OPEN_DOWNGRADE procedures

    +

    svm_ontaps3_svm_delete_object_failed

    +

    Number of failed DELETE object operations. svm_ontaps3_svm_delete_object_failed is ontaps3_svm_delete_object_failed aggregated by svm.

    @@ -56953,44 +62953,46 @@

    svm_nfs_open_downgrade_totalUnit: none
    Type: rate
    Base: -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41open_downgrade.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverdelete_object_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42open_downgrade.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverdelete_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_delete_object_failed_client_close

    +

    Number of times DELETE object operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_delete_object_failed_client_close is ontaps3_svm_delete_object_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4open_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1open_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverdelete_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2open_downgrade_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverdelete_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_open_total

    -

    Total number of OPEN procedures

    +

    svm_ontaps3_svm_delete_object_latency

    +

    Average latency for DELETE object operations. svm_ontaps3_svm_delete_object_latency is ontaps3_svm_delete_object_latency aggregated by svm.

    @@ -57003,44 +63005,46 @@

    svm_nfs_open_totalUnit: none
    Type: rate
    Base: -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41open.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverdelete_object_latency
    Unit: microsec
    Type: average
    Base: delete_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42open.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverdelete_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_delete_object_rate

    +

    Number of DELETE object operations per second. svm_ontaps3_svm_delete_object_rate is ontaps3_svm_delete_object_rate aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverdelete_object_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2open_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverdelete_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_openattr_avg_latency

    -

    Average latency of OPENATTR procedures

    +

    svm_ontaps3_svm_delete_object_tagging_failed

    +

    Number of failed DELETE object tagging operations. svm_ontaps3_svm_delete_object_tagging_failed is ontaps3_svm_delete_object_tagging_failed aggregated by svm.

    @@ -57053,44 +63057,46 @@

    svm_nfs_openattr_avg_latencyUnit: microsec
    Type: average
    Base: openattr.total -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41openattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverdelete_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42openattr.average_latency
    Unit: microsec
    Type: average
    Base: openattr.total
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverdelete_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_delete_object_tagging_failed_client_close

    +

    Number of times DELETE object tagging operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_delete_object_tagging_failed_client_close is ontaps3_svm_delete_object_tagging_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverdelete_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2openattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: openattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverdelete_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_openattr_total

    -

    Total number of OPENATTR procedures

    +

    svm_ontaps3_svm_delete_object_tagging_latency

    +

    Average latency for DELETE object tagging operations. svm_ontaps3_svm_delete_object_tagging_latency is ontaps3_svm_delete_object_tagging_latency aggregated by svm.

    @@ -57103,44 +63109,46 @@

    svm_nfs_openattr_total

    - - - - - - - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverdelete_object_tagging_latency
    Unit: microsec
    Type: average
    Base: delete_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42openattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverdelete_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: delete_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_delete_object_tagging_rate

    +

    Number of DELETE object tagging operations per second. svm_ontaps3_svm_delete_object_tagging_rate is ontaps3_svm_delete_object_tagging_rate aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverdelete_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2openattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverdelete_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_ops

    -

    Total number of NFSv3 procedure requests per second.

    +

    svm_ontaps3_svm_delete_object_tagging_total

    +

    Number of DELETE object tagging operations. svm_ontaps3_svm_delete_object_tagging_total is ontaps3_svm_delete_object_tagging_total aggregated by svm.

    @@ -57153,56 +63161,72 @@

    svm_nfs_ops

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverdelete_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverdelete_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_delete_object_total

    +

    Number of DELETE object operations. svm_ontaps3_svm_delete_object_total is ontaps3_svm_delete_object_total aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42total_ops
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverdelete_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverdelete_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_explicit_deny_access

    +

    Number of times access was denied explicitly by a policy statement. svm_ontaps3_svm_explicit_deny_access is ontaps3_svm_explicit_deny_access aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverexplicit_deny_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2total_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverexplicit_deny_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_pathconf_avg_latency

    -

    Average latency of PathConf procedure requests. The counter keeps track of the average response time of PathConf requests.

    +

    svm_ontaps3_svm_get_bucket_acl_failed

    +

    Number of failed GET Bucket ACL operations. svm_ontaps3_svm_get_bucket_acl_failed is ontaps3_svm_get_bucket_acl_failed aggregated by svm.

    @@ -57215,20 +63239,20 @@

    svm_nfs_pathconf_avg_latencyUnit: microsec
    Type: average
    Base: pathconf.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverget_bucket_acl_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3pathconf_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: pathconf_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverget_bucket_acl_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_pathconf_total

    -

    Total number PathConf of procedure requests. It is the total number of PathConf success and PathConf error requests.

    +

    svm_ontaps3_svm_get_bucket_acl_total

    +

    Number of GET Bucket ACL operations. svm_ontaps3_svm_get_bucket_acl_total is ontaps3_svm_get_bucket_acl_total aggregated by svm.

    @@ -57241,20 +63265,20 @@

    svm_nfs_pathconf_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3pathconf.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverget_bucket_acl_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3pathconf_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverget_bucket_acl_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_putfh_avg_latency

    -

    Average latency of PUTFH procedures

    +

    svm_ontaps3_svm_get_bucket_versioning_failed

    +

    Number of failed Get Bucket Versioning operations. svm_ontaps3_svm_get_bucket_versioning_failed is ontaps3_svm_get_bucket_versioning_failed aggregated by svm.

    @@ -57267,44 +63291,46 @@

    svm_nfs_putfh_avg_latencyUnit: microsec
    Type: average
    Base: putfh.total -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41putfh.average_latency
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverget_bucket_versioning_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42putfh.average_latency
    Unit: microsec
    Type: average
    Base: putfh.total
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverget_bucket_versioning_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_get_bucket_versioning_total

    +

    Number of Get Bucket Versioning operations. svm_ontaps3_svm_get_bucket_versioning_total is ontaps3_svm_get_bucket_versioning_total aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverget_bucket_versioning_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2putfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverget_bucket_versioning_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_putfh_total

    -

    Total number of PUTFH procedures

    +

    svm_ontaps3_svm_get_data

    +

    Rate of GET object data transfers per second. svm_ontaps3_svm_get_data is ontaps3_svm_get_data aggregated by svm.

    @@ -57317,44 +63343,46 @@

    svm_nfs_putfh_totalUnit: none
    Type: rate
    Base: -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41putfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverget_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42putfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverget_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_get_object_acl_failed

    +

    Number of failed GET Object ACL operations. svm_ontaps3_svm_get_object_acl_failed is ontaps3_svm_get_object_acl_failed aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4putfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1putfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverget_object_acl_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2putfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverget_object_acl_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_putpubfh_avg_latency

    -

    Average latency of PUTPUBFH procedures

    +

    svm_ontaps3_svm_get_object_acl_total

    +

    Number of GET Object ACL operations. svm_ontaps3_svm_get_object_acl_total is ontaps3_svm_get_object_acl_total aggregated by svm.

    @@ -57367,44 +63395,46 @@

    svm_nfs_putpubfh_avg_latencyUnit: microsec
    Type: average
    Base: putpubfh.total -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41putpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverget_object_acl_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42putpubfh.average_latency
    Unit: microsec
    Type: average
    Base: putpubfh.total
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverget_object_acl_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_get_object_failed

    +

    Number of failed GET object operations. svm_ontaps3_svm_get_object_failed is ontaps3_svm_get_object_failed aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4putpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1putpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverget_object_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2putpubfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putpubfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverget_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_putpubfh_total

    -

    Total number of PUTPUBFH procedures

    +

    svm_ontaps3_svm_get_object_failed_client_close

    +

    Number of times GET object operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_get_object_failed_client_close is ontaps3_svm_get_object_failed_client_close aggregated by svm.

    @@ -57417,44 +63447,20 @@

    svm_nfs_putpubfh_total

    - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4putpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41putpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42putpubfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4putpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1putpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverget_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2putpubfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverget_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_putrootfh_avg_latency

    -

    Average latency of PUTROOTFH procedures

    +

    svm_ontaps3_svm_get_object_lastbyte_latency

    +

    Average last-byte latency for GET object operations. svm_ontaps3_svm_get_object_lastbyte_latency is ontaps3_svm_get_object_lastbyte_latency aggregated by svm.

    @@ -57467,44 +63473,20 @@

    svm_nfs_putrootfh_avg_latencyUnit: microsec
    Type: average
    Base: putrootfh.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41putrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42putrootfh.average_latency
    Unit: microsec
    Type: average
    Base: putrootfh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4putrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1putrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverget_object_lastbyte_latency
    Unit: microsec
    Type: average
    Base: get_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2putrootfh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: putrootfh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverget_object_lastbyte_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_lastbyte_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_putrootfh_total

    -

    Total number of PUTROOTFH procedures

    +

    svm_ontaps3_svm_get_object_latency

    +

    Average first-byte latency for GET object operations. svm_ontaps3_svm_get_object_latency is ontaps3_svm_get_object_latency aggregated by svm.

    @@ -57517,44 +63499,46 @@

    svm_nfs_putrootfh_total

    - - - - - - - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4putrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41putrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverget_object_latency
    Unit: microsec
    Type: average
    Base: get_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42putrootfh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverget_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_get_object_rate

    +

    Number of GET object operations per second. svm_ontaps3_svm_get_object_rate is ontaps3_svm_get_object_rate aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4putrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1putrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverget_object_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2putrootfh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverget_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_read_avg_latency

    -

    Average latency of Read procedure requests. The counter keeps track of the average response time of Read requests.

    +

    svm_ontaps3_svm_get_object_tagging_failed

    +

    Number of failed GET object tagging operations. svm_ontaps3_svm_get_object_tagging_failed is ontaps3_svm_get_object_tagging_failed aggregated by svm.

    @@ -57567,56 +63551,72 @@

    svm_nfs_read_avg_latency

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverget_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_get_object_tagging_failed_client_close

    +

    Number of times GET object tagging operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_get_object_tagging_failed_client_close is ontaps3_svm_get_object_tagging_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42read.average_latency
    Unit: microsec
    Type: average
    Base: read.total
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverget_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_get_object_tagging_latency

    +

    Average latency for GET object tagging operations. svm_ontaps3_svm_get_object_tagging_latency is ontaps3_svm_get_object_tagging_latency aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverget_object_tagging_latency
    Unit: microsec
    Type: average
    Base: get_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2read_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverget_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: get_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_read_ops

    -

    Total observed NFSv3 read operations per second.

    +

    svm_ontaps3_svm_get_object_tagging_rate

    +

    Number of GET object tagging operations per second. svm_ontaps3_svm_get_object_tagging_rate is ontaps3_svm_get_object_tagging_rate aggregated by svm.

    @@ -57629,20 +63629,20 @@

    svm_nfs_read_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_read_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverget_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    - -

    Average latency of ReadSymLink procedure requests. The counter keeps track of the average response time of ReadSymLink requests.

    +

    svm_ontaps3_svm_get_object_tagging_total

    +

    Number of GET object tagging operations. svm_ontaps3_svm_get_object_tagging_total is ontaps3_svm_get_object_tagging_total aggregated by svm.

    @@ -57655,20 +63655,20 @@ - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3read_symlink.average_latency
    Unit: microsec
    Type: average
    Base: read_symlink.total
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverget_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3read_symlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: read_symlink_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverget_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    - -

    Total number of ReadSymLink procedure requests. It is the total number of read symlink success and read symlink error requests.

    +

    svm_ontaps3_svm_get_object_total

    +

    Number of GET object operations. svm_ontaps3_svm_get_object_total is ontaps3_svm_get_object_total aggregated by svm.

    @@ -57681,20 +63681,20 @@ + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverget_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3read_symlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverget_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_read_throughput

    -

    Rate of NFSv3 read data transfers per second.

    +

    svm_ontaps3_svm_group_policy_evaluated

    +

    Number of times group policies were evaluated. svm_ontaps3_svm_group_policy_evaluated is ontaps3_svm_group_policy_evaluated aggregated by svm.

    @@ -57707,56 +63707,72 @@

    svm_nfs_read_throughput

    - - - + + + - - - - + + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_servergroup_policy_evaluated
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4total.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_servergroup_policy_evaluated
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_head_bucket_failed

    +

    Number of failed HEAD bucket operations. svm_ontaps3_svm_head_bucket_failed is ontaps3_svm_head_bucket_failed aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41total.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42total.read_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverhead_bucket_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverhead_bucket_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_head_bucket_failed_client_close

    +

    Number of times HEAD bucket operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_head_bucket_failed_client_close is ontaps3_svm_head_bucket_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4nfs4_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1nfs41_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverhead_bucket_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2nfs42_read_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverhead_bucket_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_read_total

    -

    Total number Read of procedure requests. It is the total number of read success and read error requests.

    +

    svm_ontaps3_svm_head_bucket_latency

    +

    Average latency for HEAD bucket operations. svm_ontaps3_svm_head_bucket_latency is ontaps3_svm_head_bucket_latency aggregated by svm.

    @@ -57769,56 +63785,72 @@

    svm_nfs_read_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverhead_bucket_latency
    Unit: microsec
    Type: average
    Base: head_bucket_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4read.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverhead_bucket_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_bucket_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_head_bucket_rate

    +

    Number of HEAD bucket operations per second. svm_ontaps3_svm_head_bucket_rate is ontaps3_svm_head_bucket_rate aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41read.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42read.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverhead_bucket_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3read_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverhead_bucket_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_head_bucket_total

    +

    Number of HEAD bucket operations. svm_ontaps3_svm_head_bucket_total is ontaps3_svm_head_bucket_total aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4read_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1read_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverhead_bucket_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2read_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverhead_bucket_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_readdir_avg_latency

    -

    Average latency of ReadDir procedure requests. The counter keeps track of the average response time of ReadDir requests.

    +

    svm_ontaps3_svm_head_object_failed

    +

    Number of failed HEAD Object operations. svm_ontaps3_svm_head_object_failed is ontaps3_svm_head_object_failed aggregated by svm.

    @@ -57831,56 +63863,72 @@

    svm_nfs_readdir_avg_latencyUnit: microsec
    Type: average
    Base: readdir.total -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverhead_object_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4readdir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverhead_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_head_object_failed_client_close

    +

    Number of times HEAD object operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_head_object_failed_client_close is ontaps3_svm_head_object_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41readdir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42readdir.average_latency
    Unit: microsec
    Type: average
    Base: readdir.total
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverhead_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverhead_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_head_object_latency

    +

    Average latency for HEAD object operations. svm_ontaps3_svm_head_object_latency is ontaps3_svm_head_object_latency aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverhead_object_latency
    Unit: microsec
    Type: average
    Base: head_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2readdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdir_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverhead_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_readdir_total

    -

    Total number ReadDir of procedure requests. It is the total number of ReadDir success and ReadDir error requests.

    +

    svm_ontaps3_svm_head_object_rate

    +

    Number of HEAD Object operations per second. svm_ontaps3_svm_head_object_rate is ontaps3_svm_head_object_rate aggregated by svm.

    @@ -57893,56 +63941,72 @@

    svm_nfs_readdir_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverhead_object_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverhead_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_head_object_total

    +

    Number of HEAD Object operations. svm_ontaps3_svm_head_object_total is ontaps3_svm_head_object_total aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42readdir.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverhead_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverhead_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_initiate_multipart_upload_failed

    +

    Number of failed Initiate Multipart Upload operations. svm_ontaps3_svm_initiate_multipart_upload_failed is ontaps3_svm_initiate_multipart_upload_failed aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2readdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_readdirplus_avg_latency

    -

    Average latency of ReadDirPlus procedure requests. The counter keeps track of the average response time of ReadDirPlus requests.

    +

    svm_ontaps3_svm_initiate_multipart_upload_failed_client_close

    +

    Number of times Initiate Multipart Upload operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_initiate_multipart_upload_failed_client_close is ontaps3_svm_initiate_multipart_upload_failed_client_close aggregated by svm.

    @@ -57955,20 +64019,20 @@

    svm_nfs_readdirplus_avg_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3readdirplus.average_latency
    Unit: microsec
    Type: average
    Base: readdirplus.total
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3readdirplus_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readdirplus_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_readdirplus_total

    -

    Total number ReadDirPlus of procedure requests. It is the total number of ReadDirPlus success and ReadDirPlus error requests.

    +

    svm_ontaps3_svm_initiate_multipart_upload_latency

    +

    Average latency for Initiate Multipart Upload operations. svm_ontaps3_svm_initiate_multipart_upload_latency is ontaps3_svm_initiate_multipart_upload_latency aggregated by svm.

    @@ -57981,20 +64045,20 @@

    svm_nfs_readdirplus_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_latency
    Unit: microsec
    Type: average
    Base: initiate_multipart_upload_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3readdirplus_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: initiate_multipart_upload_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    - -

    Average latency of READLINK procedures

    +

    svm_ontaps3_svm_initiate_multipart_upload_rate

    +

    Number of Initiate Multipart Upload operations per second. svm_ontaps3_svm_initiate_multipart_upload_rate is ontaps3_svm_initiate_multipart_upload_rate aggregated by svm.

    @@ -58007,44 +64071,20 @@ - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41readlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42readlink.average_latency
    Unit: microsec
    Type: average
    Base: readlink.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2readlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: readlink_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    - -

    Total number of READLINK procedures

    +

    svm_ontaps3_svm_initiate_multipart_upload_total

    +

    Number of Initiate Multipart Upload operations. svm_ontaps3_svm_initiate_multipart_upload_total is ontaps3_svm_initiate_multipart_upload_total aggregated by svm.

    @@ -58057,44 +64097,20 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42readlink.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverinitiate_multipart_upload_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2readlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverinitiate_multipart_upload_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_reclaim_complete_avg_latency

    -

    Average latency of RECLAIM_COMPLETE operations.

    +

    svm_ontaps3_svm_input_flow_control_entry

    +

    Number of times input flow control was entered. svm_ontaps3_svm_input_flow_control_entry is ontaps3_svm_input_flow_control_entry aggregated by svm.

    @@ -58107,32 +64123,20 @@

    svm_nfs_reclaim_complete_avg_laten

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41reclaim_complete.average_latency
    Unit: microsec
    Type: average
    Base: reclaim_complete.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42reclaim_complete.average_latency
    Unit: microsec
    Type: average
    Base: reclaim_complete.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1reclaim_complete_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: reclaim_complete_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverinput_flow_control_entry
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2reclaim_complete_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: reclaim_complete_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverinput_flow_control_entry
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_reclaim_complete_total

    -

    Total number of RECLAIM_COMPLETE operations.

    +

    svm_ontaps3_svm_input_flow_control_exit

    +

    Number of times input flow control was exited. svm_ontaps3_svm_input_flow_control_exit is ontaps3_svm_input_flow_control_exit aggregated by svm.

    @@ -58145,32 +64149,20 @@

    svm_nfs_reclaim_complete_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42reclaim_complete.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1reclaim_complete_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverinput_flow_control_exit
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2reclaim_complete_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverinput_flow_control_exit
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_release_lock_owner_avg_latency

    -

    Average Latency of RELEASE_LOCKOWNER procedures

    +

    svm_ontaps3_svm_list_buckets_failed

    +

    Number of failed LIST Buckets operations. svm_ontaps3_svm_list_buckets_failed is ontaps3_svm_list_buckets_failed aggregated by svm.

    @@ -58183,20 +64175,20 @@

    svm_nfs_release_lock_owner_avg_l

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4release_lock_owner.average_latency
    Unit: microsec
    Type: average
    Base: release_lock_owner.total
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4release_lock_owner_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: release_lock_owner_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverlist_buckets_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_release_lock_owner_total

    -

    Total number of RELEASE_LOCKOWNER procedures

    +

    svm_ontaps3_svm_list_buckets_failed_client_close

    +

    Number of times LIST Bucket operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_list_buckets_failed_client_close is ontaps3_svm_list_buckets_failed_client_close aggregated by svm.

    @@ -58209,20 +64201,20 @@

    svm_nfs_release_lock_owner_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4release_lock_owner.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4release_lock_owner_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverlist_buckets_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_remove_avg_latency

    -

    Average latency of Remove procedure requests. The counter keeps track of the average response time of Remove requests.

    +

    svm_ontaps3_svm_list_buckets_latency

    +

    Average latency for LIST Buckets operations. svm_ontaps3_svm_list_buckets_latency is ontaps3_svm_list_buckets_latency aggregated by svm.

    @@ -58235,56 +64227,20 @@

    svm_nfs_remove_avg_latencyUnit: microsec
    Type: average
    Base: remove.total -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42remove.average_latency
    Unit: microsec
    Type: average
    Base: remove.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_latency
    Unit: microsec
    Type: average
    Base: list_buckets_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2remove_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: remove_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_buckets_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: head_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_remove_total

    -

    Total number Remove of procedure requests. It is the total number of Remove success and Remove error requests.

    +

    svm_ontaps3_svm_list_buckets_rate

    +

    Number of LIST Buckets operations per second. svm_ontaps3_svm_list_buckets_rate is ontaps3_svm_list_buckets_rate aggregated by svm.

    @@ -58297,56 +64253,20 @@

    svm_nfs_remove_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42remove.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2remove_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_buckets_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_rename_avg_latency

    -

    Average latency of Rename procedure requests. The counter keeps track of the average response time of Rename requests.

    +

    svm_ontaps3_svm_list_buckets_total

    +

    Number of LIST Buckets operations. svm_ontaps3_svm_list_buckets_total is ontaps3_svm_list_buckets_total aggregated by svm.

    @@ -58359,56 +64279,20 @@

    svm_nfs_rename_avg_latencyUnit: microsec
    Type: average
    Base: rename.total -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42rename.average_latency
    Unit: microsec
    Type: average
    Base: rename.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_buckets_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2rename_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rename_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_buckets_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_rename_total

    -

    Total number Rename of procedure requests. It is the total number of Rename success and Rename error requests.

    +

    svm_ontaps3_svm_list_object_versions_failed

    +

    Number of failed LIST object versions operations. svm_ontaps3_svm_list_object_versions_failed is ontaps3_svm_list_object_versions_failed aggregated by svm.

    @@ -58421,56 +64305,72 @@

    svm_nfs_rename_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverlist_object_versions_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_list_object_versions_failed_client_close

    +

    Number of times LIST object versions operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_list_object_versions_failed_client_close is ontaps3_svm_list_object_versions_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42rename.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverlist_object_versions_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_list_object_versions_latency

    +

    Average latency for LIST Object versions operations. svm_ontaps3_svm_list_object_versions_latency is ontaps3_svm_list_object_versions_latency aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverlist_object_versions_latency
    Unit: microsec
    Type: average
    Base: list_object_versions_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2rename_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_object_versions_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_object_versions_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_renew_avg_latency

    -

    Average latency of RENEW procedures

    +

    svm_ontaps3_svm_list_object_versions_rate

    +

    Number of LIST Object Versions operations per second. svm_ontaps3_svm_list_object_versions_rate is ontaps3_svm_list_object_versions_rate aggregated by svm.

    @@ -58483,20 +64383,20 @@

    svm_nfs_renew_avg_latencyUnit: microsec
    Type: average
    Base: renew.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4renew_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: renew_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverlist_object_versions_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_renew_total

    -

    Total number of RENEW procedures

    +

    svm_ontaps3_svm_list_object_versions_total

    +

    Number of LIST Object Versions operations. svm_ontaps3_svm_list_object_versions_total is ontaps3_svm_list_object_versions_total aggregated by svm.

    @@ -58509,20 +64409,20 @@

    svm_nfs_renew_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverlist_object_versions_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4renew_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverlist_object_versions_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_restorefh_avg_latency

    -

    Average latency of RESTOREFH procedures

    +

    svm_ontaps3_svm_list_objects_failed

    +

    Number of failed LIST objects operations. svm_ontaps3_svm_list_objects_failed is ontaps3_svm_list_objects_failed aggregated by svm.

    @@ -58535,44 +64435,20 @@

    svm_nfs_restorefh_avg_latencyUnit: microsec
    Type: average
    Base: restorefh.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41restorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42restorefh.average_latency
    Unit: microsec
    Type: average
    Base: restorefh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4restorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1restorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_objects_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2restorefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: restorefh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_objects_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_restorefh_total

    -

    Total number of RESTOREFH procedures

    +

    svm_ontaps3_svm_list_objects_failed_client_close

    +

    Number of times LIST objects operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_list_objects_failed_client_close is ontaps3_svm_list_objects_failed_client_close aggregated by svm.

    @@ -58585,44 +64461,20 @@

    svm_nfs_restorefh_total

    - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4restorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41restorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42restorefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4restorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1restorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_objects_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2restorefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_objects_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_rmdir_avg_latency

    -

    Average latency of RmDir procedure requests. The counter keeps track of the average response time of RmDir requests.

    +

    svm_ontaps3_svm_list_objects_latency

    +

    Average latency for LIST Objects operations. svm_ontaps3_svm_list_objects_latency is ontaps3_svm_list_objects_latency aggregated by svm.

    @@ -58635,20 +64487,20 @@

    svm_nfs_rmdir_avg_latencyUnit: microsec
    Type: average
    Base: rmdir.total -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverlist_objects_latency
    Unit: microsec
    Type: average
    Base: list_objects_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3rmdir_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: rmdir_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverlist_objects_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_objects_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_rmdir_total

    -

    Total number RmDir of procedure requests. It is the total number of RmDir success and RmDir error requests.

    +

    svm_ontaps3_svm_list_objects_rate

    +

    Number of LIST Objects operations per second. svm_ontaps3_svm_list_objects_rate is ontaps3_svm_list_objects_rate aggregated by svm.

    @@ -58661,20 +64513,20 @@

    svm_nfs_rmdir_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverlist_objects_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3rmdir_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverlist_objects_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_savefh_avg_latency

    -

    Average latency of SAVEFH procedures

    +

    svm_ontaps3_svm_list_objects_total

    +

    Number of LIST Objects operations. svm_ontaps3_svm_list_objects_total is ontaps3_svm_list_objects_total aggregated by svm.

    @@ -58687,44 +64539,20 @@

    svm_nfs_savefh_avg_latencyUnit: microsec
    Type: average
    Base: savefh.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41savefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42savefh.average_latency
    Unit: microsec
    Type: average
    Base: savefh.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4savefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1savefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_objects_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2savefh_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: savefh_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_objects_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_savefh_total

    -

    Total number of SAVEFH procedures

    +

    svm_ontaps3_svm_list_uploads_failed

    +

    Number of failed LIST Upload operations. svm_ontaps3_svm_list_uploads_failed is ontaps3_svm_list_uploads_failed aggregated by svm.

    @@ -58737,44 +64565,46 @@

    svm_nfs_savefh_totalUnit: none
    Type: rate
    Base: -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41savefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_uploads_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42savefh.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverlist_uploads_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_list_uploads_failed_client_close

    +

    Number of times LIST Upload operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_list_uploads_failed_client_close is ontaps3_svm_list_uploads_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4savefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1savefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverlist_uploads_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2savefh_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_uploads_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_secinfo_avg_latency

    -

    Average latency of SECINFO procedures

    +

    svm_ontaps3_svm_list_uploads_latency

    +

    Average latency for LIST Upload operations. svm_ontaps3_svm_list_uploads_latency is ontaps3_svm_list_uploads_latency aggregated by svm.

    @@ -58787,44 +64617,46 @@

    svm_nfs_secinfo_avg_latencyUnit: microsec
    Type: average
    Base: secinfo.total -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41secinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_uploads_latency
    Unit: microsec
    Type: average
    Base: list_uploads_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42secinfo.average_latency
    Unit: microsec
    Type: average
    Base: secinfo.total
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverlist_uploads_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: list_uploads_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_list_uploads_rate

    +

    Number of LIST Upload operations per second. svm_ontaps3_svm_list_uploads_rate is ontaps3_svm_list_uploads_rate aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverlist_uploads_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2secinfo_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_uploads_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_secinfo_no_name_avg_latency

    -

    Average latency of SECINFO_NO_NAME operations.

    +

    svm_ontaps3_svm_list_uploads_total

    +

    Number of LIST Upload operations. svm_ontaps3_svm_list_uploads_total is ontaps3_svm_list_uploads_total aggregated by svm.

    @@ -58837,32 +64669,20 @@

    svm_nfs_secinfo_no_name_avg_latency

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41secinfo_no_name.average_latency
    Unit: microsec
    Type: average
    Base: secinfo_no_name.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42secinfo_no_name.average_latency
    Unit: microsec
    Type: average
    Base: secinfo_no_name.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1secinfo_no_name_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_no_name_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverlist_uploads_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2secinfo_no_name_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: secinfo_no_name_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverlist_uploads_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_secinfo_no_name_total

    -

    Total number of SECINFO_NO_NAME operations.

    +

    svm_ontaps3_svm_max_cmds_per_connection

    +

    Maximum commands pipelined at any instance on a connection. svm_ontaps3_svm_max_cmds_per_connection is ontaps3_svm_max_cmds_per_connection aggregated by svm.

    @@ -58875,32 +64695,20 @@

    svm_nfs_secinfo_no_name_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42secinfo_no_name.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1secinfo_no_name_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_servermaximum_commands_per_connection
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2secinfo_no_name_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_servermax_cmds_per_connection
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_secinfo_total

    -

    Total number of SECINFO procedures

    +

    svm_ontaps3_svm_max_connected_connections

    +

    Maximum number of object store server connections established at one time. svm_ontaps3_svm_max_connected_connections is ontaps3_svm_max_connected_connections aggregated by svm.

    @@ -58913,44 +64721,46 @@

    svm_nfs_secinfo_totalUnit: none
    Type: rate
    Base: -

    - - - - - - + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41secinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_servermaximum_connected_connections
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42secinfo.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_servermax_connected_connections
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_max_requests_outstanding

    +

    Maximum number of object store server requests in process at one time. svm_ontaps3_svm_max_requests_outstanding is ontaps3_svm_max_requests_outstanding aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_servermaximum_requests_outstanding
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2secinfo_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_servermax_requests_outstanding
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_sequence_avg_latency

    -

    Average latency of SEQUENCE operations.

    +

    svm_ontaps3_svm_multi_delete_reqs

    +

    Total number of object store server multiple object delete requests. svm_ontaps3_svm_multi_delete_reqs is ontaps3_svm_multi_delete_reqs aggregated by svm.

    @@ -58963,32 +64773,20 @@

    svm_nfs_sequence_avg_latencyUnit: microsec
    Type: average
    Base: sequence.total -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42sequence.average_latency
    Unit: microsec
    Type: average
    Base: sequence.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1sequence_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: sequence_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_servermultiple_delete_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2sequence_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: sequence_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_servermulti_delete_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_sequence_total

    -

    Total number of SEQUENCE operations.

    +

    svm_ontaps3_svm_output_flow_control_entry

    +

    Number of output flow control was entered. svm_ontaps3_svm_output_flow_control_entry is ontaps3_svm_output_flow_control_entry aggregated by svm.

    @@ -59001,32 +64799,20 @@

    svm_nfs_sequence_total

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41sequence.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42sequence.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1sequence_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serveroutput_flow_control_entry
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2sequence_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serveroutput_flow_control_entry
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_set_ssv_avg_latency

    -

    Average latency of SET_SSV operations.

    +

    svm_ontaps3_svm_output_flow_control_exit

    +

    Number of times output flow control was exited. svm_ontaps3_svm_output_flow_control_exit is ontaps3_svm_output_flow_control_exit aggregated by svm.

    @@ -59039,32 +64825,20 @@

    svm_nfs_set_ssv_avg_latencyUnit: microsec
    Type: average
    Base: set_ssv.total -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42set_ssv.average_latency
    Unit: microsec
    Type: average
    Base: set_ssv.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1set_ssv_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: set_ssv_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serveroutput_flow_control_exit
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2set_ssv_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: set_ssv_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serveroutput_flow_control_exit
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_set_ssv_total

    -

    Total number of SET_SSV operations.

    +

    svm_ontaps3_svm_presigned_url_reqs

    +

    Total number of presigned object store server URL requests. svm_ontaps3_svm_presigned_url_reqs is ontaps3_svm_presigned_url_reqs aggregated by svm.

    @@ -59077,32 +64851,20 @@

    svm_nfs_set_ssv_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42set_ssv.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1set_ssv_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverpresigned_url_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2set_ssv_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverpresigned_url_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_setattr_avg_latency

    -

    Average latency of SetAttr procedure requests. The counter keeps track of the average response time of SetAttr requests.

    +

    svm_ontaps3_svm_put_bucket_versioning_failed

    +

    Number of failed Put Bucket Versioning operations. svm_ontaps3_svm_put_bucket_versioning_failed is ontaps3_svm_put_bucket_versioning_failed aggregated by svm.

    @@ -59115,56 +64877,72 @@

    svm_nfs_setattr_avg_latencyUnit: microsec
    Type: average
    Base: setattr.total -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverput_bucket_versioning_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverput_bucket_versioning_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_put_bucket_versioning_total

    +

    Number of Put Bucket Versioning operations. svm_ontaps3_svm_put_bucket_versioning_total is ontaps3_svm_put_bucket_versioning_total aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42setattr.average_latency
    Unit: microsec
    Type: average
    Base: setattr.total
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverput_bucket_versioning_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverput_bucket_versioning_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_put_data

    +

    Rate of PUT object data transfers per second. svm_ontaps3_svm_put_data is ontaps3_svm_put_data aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverput_data
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2setattr_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setattr_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverput_data
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_setattr_total

    -

    Total number of Setattr procedure requests. It is the total number of Setattr success and setattr error requests.

    +

    svm_ontaps3_svm_put_object_failed

    +

    Number of failed PUT object operations. svm_ontaps3_svm_put_object_failed is ontaps3_svm_put_object_failed aggregated by svm.

    @@ -59177,56 +64955,72 @@

    svm_nfs_setattr_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverput_object_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverput_object_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_put_object_failed_client_close

    +

    Number of times PUT object operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_put_object_failed_client_close is ontaps3_svm_put_object_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42setattr.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverput_object_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverput_object_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_put_object_latency

    +

    Average latency for PUT object operations. svm_ontaps3_svm_put_object_latency is ontaps3_svm_put_object_latency aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - - + + + +
    ZAPIperf-object-get-instances nfsv4setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverput_object_latency
    Unit: microsec
    Type: average
    Base: put_object_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2setattr_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlZAPIperf-object-get-instances object_store_serverput_object_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: put_object_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_setclientid_avg_latency

    -

    Average latency of SETCLIENTID procedures

    +

    svm_ontaps3_svm_put_object_rate

    +

    Number of PUT object operations per second. svm_ontaps3_svm_put_object_rate is ontaps3_svm_put_object_rate aggregated by svm.

    @@ -59239,20 +65033,20 @@

    svm_nfs_setclientid_avg_latency

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4setclientid.average_latency
    Unit: microsec
    Type: average
    Base: setclientid.total
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverput_object_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4setclientid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setclientid_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverput_object_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_setclientid_confirm_avg_latency

    -

    Average latency of SETCLIENTID_CONFIRM procedures

    +

    svm_ontaps3_svm_put_object_tagging_failed

    +

    Number of failed PUT object tagging operations. svm_ontaps3_svm_put_object_tagging_failed is ontaps3_svm_put_object_tagging_failed aggregated by svm.

    @@ -59265,20 +65059,20 @@

    svm_nfs_setclientid_confirm_avg

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4setclientid_confirm.average_latency
    Unit: microsec
    Type: average
    Base: setclientid_confirm.total
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverput_object_tagging_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4setclientid_confirm_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: setclientid_confirm_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverput_object_tagging_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_setclientid_confirm_total

    -

    Total number of SETCLIENTID_CONFIRM procedures

    +

    svm_ontaps3_svm_put_object_tagging_failed_client_close

    +

    Number of times PUT object tagging operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_put_object_tagging_failed_client_close is ontaps3_svm_put_object_tagging_failed_client_close aggregated by svm.

    @@ -59291,20 +65085,20 @@

    svm_nfs_setclientid_confirm_total

    - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v4setclientid_confirm.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverput_object_tagging_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4setclientid_confirm_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverput_object_tagging_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_setclientid_total

    -

    Total number of SETCLIENTID procedures

    +

    svm_ontaps3_svm_put_object_tagging_latency

    +

    Average latency for PUT object tagging operations. svm_ontaps3_svm_put_object_tagging_latency is ontaps3_svm_put_object_tagging_latency aggregated by svm.

    @@ -59317,20 +65111,20 @@

    svm_nfs_setclientid_totalUnit: none
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yamlapi/cluster/counter/tables/object_store_serverput_object_tagging_latency
    Unit: microsec
    Type: average
    Base: put_object_tagging_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4setclientid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlperf-object-get-instances object_store_serverput_object_tagging_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: put_object_tagging_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    - -

    Average latency of SymLink procedure requests. The counter keeps track of the average response time of SymLink requests.

    +

    svm_ontaps3_svm_put_object_tagging_rate

    +

    Number of PUT object tagging operations per second. svm_ontaps3_svm_put_object_tagging_rate is ontaps3_svm_put_object_tagging_rate aggregated by svm.

    @@ -59343,20 +65137,20 @@ + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverput_object_tagging_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3symlink_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: symlink_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverput_object_tagging_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    - -

    Total number SymLink of procedure requests. It is the total number of SymLink success and create SymLink requests.

    +

    svm_ontaps3_svm_put_object_tagging_total

    +

    Number of PUT object tagging operations. svm_ontaps3_svm_put_object_tagging_total is ontaps3_svm_put_object_tagging_total aggregated by svm.

    @@ -59369,20 +65163,20 @@ + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverput_object_tagging_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3symlink_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverput_object_tagging_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_test_stateid_avg_latency

    -

    Average latency of TEST_STATEID operations.

    +

    svm_ontaps3_svm_put_object_total

    +

    Number of PUT object operations. svm_ontaps3_svm_put_object_total is ontaps3_svm_put_object_total aggregated by svm.

    @@ -59395,32 +65189,20 @@

    svm_nfs_test_stateid_avg_latency

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41test_stateid.average_latency
    Unit: microsec
    Type: average
    Base: test_stateid.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42test_stateid.average_latency
    Unit: microsec
    Type: average
    Base: test_stateid.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1test_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: test_stateid_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverput_object_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2test_stateid_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: test_stateid_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverput_object_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_test_stateid_total

    -

    Total number of TEST_STATEID operations.

    +

    svm_ontaps3_svm_request_parse_errors

    +

    Number of request parser errors due to malformed requests. svm_ontaps3_svm_request_parse_errors is ontaps3_svm_request_parse_errors aggregated by svm.

    @@ -59433,32 +65215,20 @@

    svm_nfs_test_stateid_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42test_stateid.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1test_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverrequest_parse_errors
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2test_stateid_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverrequest_parse_errors
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_throughput

    -

    Rate of NFSv3 data transfers per second.

    +

    svm_ontaps3_svm_requests

    +

    Total number of object store server requests. svm_ontaps3_svm_requests is ontaps3_svm_requests aggregated by svm.

    @@ -59471,56 +65241,20 @@

    svm_nfs_throughputUnit: b_per_sec
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4nfs4_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1nfs41_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverrequests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2nfs42_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverrequests
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_verify_avg_latency

    -

    Average latency of VERIFY procedures

    +

    svm_ontaps3_svm_requests_outstanding

    +

    Number of object store server requests in process. svm_ontaps3_svm_requests_outstanding is ontaps3_svm_requests_outstanding aggregated by svm.

    @@ -59533,44 +65267,20 @@

    svm_nfs_verify_avg_latencyUnit: microsec
    Type: average
    Base: verify.total -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41verify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42verify.average_latency
    Unit: microsec
    Type: average
    Base: verify.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverrequests_outstanding
    Unit: none
    Type: raw
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2verify_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: verify_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverrequests_outstanding
    Unit: none
    Type: raw,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_verify_total

    -

    Total number of VERIFY procedures

    +

    svm_ontaps3_svm_root_user_access

    +

    Number of times access was done by root user. svm_ontaps3_svm_root_user_access is ontaps3_svm_root_user_access aggregated by svm.

    @@ -59583,44 +65293,20 @@

    svm_nfs_verify_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41verify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42verify.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverroot_user_access
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2verify_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverroot_user_access
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_want_delegation_avg_latency

    -

    Average latency of WANT_DELEGATION operations.

    +

    svm_ontaps3_svm_server_connection_close

    +

    Number of connection closes triggered by server due to fatal errors. svm_ontaps3_svm_server_connection_close is ontaps3_svm_server_connection_close aggregated by svm.

    @@ -59633,32 +65319,20 @@

    svm_nfs_want_delegation_avg_latency

    - - - - - - - - - - - - - - - + + + - - - + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41want_delegation.average_latency
    Unit: microsec
    Type: average
    Base: want_delegation.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42want_delegation.average_latency
    Unit: microsec
    Type: average
    Base: want_delegation.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1want_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: want_delegation_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serverserver_connection_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2want_delegation_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: want_delegation_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverserver_connection_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_want_delegation_total

    -

    Total number of WANT_DELEGATION operations.

    +

    svm_ontaps3_svm_signature_v2_reqs

    +

    Total number of object store server signature V2 requests. svm_ontaps3_svm_signature_v2_reqs is ontaps3_svm_signature_v2_reqs aggregated by svm.

    @@ -59671,32 +65345,20 @@

    svm_nfs_want_delegation_totalUnit: none
    Type: rate
    Base: -

    - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42want_delegation.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv4_1want_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serversignature_v2_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2want_delegation_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serversignature_v2_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_write_avg_latency

    -

    Average latency of Write procedure requests. The counter keeps track of the average response time of Write requests.

    +

    svm_ontaps3_svm_signature_v4_reqs

    +

    Total number of object store server signature V4 requests. svm_ontaps3_svm_signature_v4_reqs is ontaps3_svm_signature_v4_reqs aggregated by svm.

    @@ -59709,56 +65371,20 @@

    svm_nfs_write_avg_latencyUnit: microsec
    Type: average
    Base: write.total -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42write.average_latency
    Unit: microsec
    Type: average
    Base: write.total
    conf/restperf/9.12.0/nfsv4_2.yaml
    ZAPIperf-object-get-instances nfsv3write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv3.yaml
    ZAPIperf-object-get-instances nfsv4write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv4.yaml
    ZAPIperf-object-get-instances nfsv4_1write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlapi/cluster/counter/tables/object_store_serversignature_v4_requests
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2write_avg_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: write_total
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serversignature_v4_reqs
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_write_ops

    -

    Total observed NFSv3 write operations per second.

    +

    svm_ontaps3_svm_tagging

    +

    Number of requests with tagging specified. svm_ontaps3_svm_tagging is ontaps3_svm_tagging aggregated by svm.

    @@ -59771,20 +65397,20 @@

    svm_nfs_write_opsUnit: per_sec
    Type: rate
    Base: -

    + + + - - - + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_servertagging
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_write_ops
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_servertagging
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_write_throughput

    -

    Rate of NFSv3 write data transfers per second.

    +

    svm_ontaps3_svm_upload_part_failed

    +

    Number of failed Upload Part operations. svm_ontaps3_svm_upload_part_failed is ontaps3_svm_upload_part_failed aggregated by svm.

    @@ -59797,56 +65423,46 @@

    svm_nfs_write_throughput

    - - - - - - - - - - - - - - - - - - - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v3write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv3.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4total.write_throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v41total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v42total.throughput
    Unit: b_per_sec
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverupload_part_failed
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3nfsv3_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverupload_part_failed
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_upload_part_failed_client_close

    +

    Number of times Upload Part operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_upload_part_failed_client_close is ontaps3_svm_upload_part_failed_client_close aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4nfs4_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1nfs41_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverupload_part_failed_client_close
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2nfs42_write_throughput
    Unit: b_per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverupload_part_failed_client_close
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    -

    svm_nfs_write_total

    -

    Total number of Write procedure requests. It is the total number of write success and write error requests.

    +

    svm_ontaps3_svm_upload_part_latency

    +

    Average latency for Upload Part operations. svm_ontaps3_svm_upload_part_latency is ontaps3_svm_upload_part_latency aggregated by svm.

    @@ -59859,51 +65475,67 @@

    svm_nfs_write_totalUnit: none
    Type: rate
    Base: -

    + + + - - - - + + + + + +
    conf/restperf/9.12.0/nfsv3.yamlapi/cluster/counter/tables/object_store_serverupload_part_latency
    Unit: microsec
    Type: average
    Base: upload_part_total
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    RESTapi/cluster/counter/tables/svm_nfs_v4write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4.yamlZAPIperf-object-get-instances object_store_serverupload_part_latency
    Unit: microsec
    Type: average,no-zero-values
    Base: upload_part_latency_base
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_upload_part_rate

    +

    Number of Upload Part operations per second. svm_ontaps3_svm_upload_part_rate is ontaps3_svm_upload_part_rate aggregated by svm.

    + + - - - - + + + + + + - - - + + + - - - + + + + +
    RESTapi/cluster/counter/tables/svm_nfs_v41write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_1.yamlAPIEndpointMetricTemplate
    RESTapi/cluster/counter/tables/svm_nfs_v42write.total
    Unit: none
    Type: rate
    Base:
    conf/restperf/9.12.0/nfsv4_2.yamlapi/cluster/counter/tables/object_store_serverupload_part_rate
    Unit: per_sec
    Type: rate
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv3write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv3.yamlperf-object-get-instances object_store_serverupload_part_rate
    Unit: per_sec
    Type: rate,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    +

    svm_ontaps3_svm_upload_part_total

    +

    Number of Upload Part operations. svm_ontaps3_svm_upload_part_total is ontaps3_svm_upload_part_total aggregated by svm.

    + + - - - - + + + + + + - - - - + + + + - - - + + +
    ZAPIperf-object-get-instances nfsv4write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4.yamlAPIEndpointMetricTemplate
    ZAPIperf-object-get-instances nfsv4_1write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.8.0/nfsv4_1.yamlRESTapi/cluster/counter/tables/object_store_serverupload_part_total
    Unit: none
    Type: delta
    Base:
    conf/restperf/9.14.1/ontap_s3_svm.yaml
    ZAPIperf-object-get-instances nfsv4_2write_total
    Unit: none
    Type: rate
    Base:
    conf/zapiperf/cdot/9.11.0/nfsv4_2.yamlperf-object-get-instances object_store_serverupload_part_total
    Unit: none
    Type: delta,no-zero-values
    Base:
    conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml
    diff --git a/nightly/plugins/index.html b/nightly/plugins/index.html index 83de2f528..2ab797f67 100644 --- a/nightly/plugins/index.html +++ b/nightly/plugins/index.html @@ -1713,6 +1713,63 @@ + + +
  • + + + VolumeTopClients + + + + +
  • @@ -2262,6 +2319,63 @@ + + +
  • + + + VolumeTopClients + + + + +
  • @@ -2853,6 +2967,27 @@

    Object DeletionViewing the Metrics

    You can view the metrics published by the ChangeLog plugin in the ChangeLog Monitor dashboard in Grafana. This dashboard provides a visual representation of the changes tracked by the plugin for volume, svm, and node objects.

    +

    VolumeTopClients

    +

    The VolumeTopClients plugin is used to track a volume's top clients for volumes in terms of read and write IOPS, as well as read and write throughput. This plugin is available only through the RestPerf Collector in ONTAP version 9.12 and later.

    +

    Enabling the Plugin

    +

    Top Clients collection is disabled by default. To enable Top Clients tracking in Harvest, follow these steps:

    +
      +
    1. Ensure you are using ONTAP version 9.12 or later.
    2. +
    3. Enable the Top Clients collection in the RestPerf Collector Volume template via the VolumeTopClients plugin.
    4. +
    +

    For detailed steps on how to enable the plugin, refer to the discussion here.

    +

    Configuration Parameters

    +

    max_volumes

    +

    The max_volumes parameter specifies the maximum number of top volumes to track. By default, this value is set to 5, but it can be configured up to a maximum of 50.

    +

    The plugin will select the top volumes based on the descending order of read IOPS, write IOPS, read throughput, and write throughput in each performance poll. This means that during each performance poll, the plugin will:

    +
      +
    1. Collect the read IOPS, write IOPS, read throughput, and write throughput for all volumes.
    2. +
    3. Sort the volumes in descending order based on their metric values.
    4. +
    5. Select the top volumes as specified by max_volumes.
    6. +
    7. Collect top clients metrics for these volumes.
    8. +
    +

    Viewing the Metrics

    +

    You can view the metrics published by the VolumeTopClients plugin in the Volume dashboard under the Top Clients row in Grafana.

    diff --git a/nightly/search/search_index.json b/nightly/search/search_index.json index 439723c1d..929f99d73 100644 --- a/nightly/search/search_index.json +++ b/nightly/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"What is Harvest?","text":"

    Harvest is the open-metrics endpoint for ONTAP and StorageGRID

    NetApp Harvest brings observability to ONTAP and StorageGRID clusters. Harvest collects performance, capacity and hardware metrics from ONTAP and StorageGRID, transforms them, and routes them to your choice of a time-series database.

    The included Grafana dashboards deliver the datacenter insights you need, while new metrics can be collected with a few edits of the included template files.

    Harvest is open-source, released under an Apache2 license, and offers great flexibility in how you collect, augment, and export your datacenter metrics.

    Out-of-the-box Harvest provides a set of pollers, collectors, templates, exporters, an optional auto-discover daemon, and a set of StorageGRID and ONTAP dashboards for Prometheus and Grafana. Harvest collects the metrics and makes them available to a separately installed instance of Prometheus/InfluxDB and Grafana.

    • Concepts
    • Quickstart Guide

    If you'd like to familiarize yourself with Harvest's core concepts, we recommend reading concepts.

    If you feel comfortable with the concepts, we recommend our quickstart guide, which takes you through a practical example.

    Note

    Hop onto our Discord or GitHub discussions and say hi. \ud83d\udc4b\ud83c\udffd

    "},{"location":"MigratePrometheusDocker/","title":"Migrate Prometheus Docker Volume","text":"

    If you want to keep your historical Prometheus data, and you generated your harvest-compose.yml file via bin/harvest generate before Harvest 22.11, please follow the steps below to migrate your historical Prometheus data.

    This is not required if you generated your harvest-compose.yml file via bin/harvest generate at Harvest release 22.11 or after.

    Outline of steps: 1. Stop Prometheus container so data acquiesces 2. Find historical Prometheus volume and create new Prometheus data volume 3. Create a new Prometheus volume that Harvest 22.11 and after will use 4. Copy the historical Prometheus data from the old volume to the new one 5. Optionally remove the historical Prometheus volume

    "},{"location":"MigratePrometheusDocker/#stop-prometheus-container","title":"Stop Prometheus container","text":"

    It's safe to run the stop and rm commands below regardless if Prometheus is running or not since removing the container does not touch the historical data stored in the volume.

    Stop all containers named Prometheus and remove them.

    docker stop (docker ps -fname=prometheus -q) && docker rm (docker ps -a -fname=prometheus -q)\n

    Docker may complain if the container is not running, like so. You can ignore this.

    Ignorable output when container is not running (click me)
    \"docker stop\" requires at least 1 argument.\nSee 'docker stop --help'.\n\nUsage:  docker stop [OPTIONS] CONTAINER [CONTAINER...]\n\nStop one or more running containers\n
    "},{"location":"MigratePrometheusDocker/#find-the-name-of-the-prometheus-volume-that-has-the-historical-data","title":"Find the name of the Prometheus volume that has the historical data","text":"
    docker volume ls -f name=prometheus -q\n

    Output should look like this:

    harvest-22080-1_linux_amd64_prometheus_data  # historical Prometheus data here\nharvest_prometheus_data                      # it is fine if this line is missing\n

    We want to copy the historical data from harvest-22080-1_linux_amd64_prometheus_data to harvest_prometheus_data

    If harvest_prometheus_data already exists, you need to decide if you want to move that volume's data to a different volume or remove it. If you want to remove the volume, run docker volume rm harvest_prometheus_data. If you want to move the data, adjust the command below to first copy harvest_prometheus_data to a different volume and then remove it.

    "},{"location":"MigratePrometheusDocker/#create-new-prometheus-volume","title":"Create new Prometheus volume","text":"

    We're going to create a new mount named, harvest_prometheus_data by executing:

    docker volume create --name harvest_prometheus_data\n
    "},{"location":"MigratePrometheusDocker/#copy-the-historical-prometheus-data","title":"Copy the historical Prometheus data","text":"

    We will copy the historical Prometheus data from the old volume to the new one by mounting both volumes and copying data between them. NOTE: Prometheus only supports copying a single volume. It will not work if you attempt to copy multiple volumes into the same destination volume.

    # replace  `HISTORICAL_VOLUME` with the name of the Prometheus volume that contains you historical data found in step 2.\ndocker run --rm -it -v $HISTORICAL_VOLUME:/from -v harvest_prometheus_data:/to alpine ash -c \"cd /from ; cp -av . /to\"\n

    Output will look something like this:

    './wal' -> '/to/./wal'\n'./wal/00000000' -> '/to/./wal/00000000'\n'./chunks_head' -> '/to/./chunks_head'\n...\n
    "},{"location":"MigratePrometheusDocker/#optionally-remove-historical-prometheus-data","title":"Optionally remove historical Prometheus data","text":"

    Before removing the historical data, start your compose stack and make sure everything works.

    Once you're satisfied that you can destroy the old data, remove it like so.

    # replace `HISTORICAL_VOLUME` with the name of the Prometheus volume that contains your historical data found in step 2.\ndocker volume rm $HISTORICAL_VOLUME\n
    "},{"location":"MigratePrometheusDocker/#reference","title":"Reference","text":"
    • Rename Docker Volume
    "},{"location":"concepts/","title":"Concepts","text":"

    In order to understand how Harvest works, it's important to understand the following concepts:

    • Poller
    • Collectors
    • Templates
    • Exporters
    • Dashboards
    • Port Map

    In addition to the above concepts, Harvest uses the following software that you will want to be familiar with:

    • Prometheus
    • InfluxDB
    • Dashboards
    • Prometheus Auto-discover
    • Docker
    • NABox
    "},{"location":"concepts/#poller","title":"Poller","text":"

    The poller is the resident daemon process that coordinates the collectors and exporters. There will be one poller per monitored cluster.

    "},{"location":"concepts/#collectors","title":"Collectors","text":"

    Collectors implement the necessary protocol required to speak to the cluster. Harvest ships with ZAPI, REST, EMS, and StorageGRID collectors. Collectors use a set of per-object template files to determine which metrics to collect.

    More information:

    • Configuring Collectors
    "},{"location":"concepts/#templates","title":"Templates","text":"

    Templates define which metrics should be collected for an object (e.g. volume, lun, SVM, etc.). Harvest ships with a set of templates for each collector. The templates are written in YAML and are straightforward to read and modify. The templates are located in the conf directory.

    There are two kinds of templates:

    "},{"location":"concepts/#collector-templates","title":"Collector Templates","text":"

    Collector templates (e.g. conf/rest/default.yaml) define which set of objects Harvest should collect from the system being monitored when that collector runs. For example, the conf/rest/default.yaml collector template defines which objects should be collected by the REST collector, while conf/storagegrid/default.yaml lists which objects should be collected by the StorageGRID collector.

    "},{"location":"concepts/#object-templates","title":"Object Templates","text":"

    Object templates (e.g. conf/rest/9.12.0/disk.yaml) define which metrics should be collected and exported for an object. For example, the disk.yaml object template defines which disk metrics should be collected (e.g. disk_bytes_per_sector, disk_stats_average_latency, disk_uptime, etc.)

    More information:

    • Templates
    • Templates and Metrics
    "},{"location":"concepts/#exporters","title":"Exporters","text":"

    Exporters are responsible for encoding the collected metrics and making them available to time-series databases. Harvest ships with Prometheus and InfluxDB exporters. Harvest does not include Prometheus and InfluxDB, only the exporters for them. Prometheus and InfluxDB must be installed separately via Docker, NAbox, or other means.

    "},{"location":"concepts/#prometheus","title":"Prometheus","text":"

    Prometheus is an open-source time-series database. It is a popular choice for storing and querying metrics.

    Don't call us, we'll call you

    None of the pollers know anything about Prometheus. That's because Prometheus pulls metrics from the poller's Prometheus exporter. The exporter creates an HTTP(s) endpoint that Prometheus scrapes on its own schedule.

    More information:

    • Prometheus Exporter
    "},{"location":"concepts/#influxdb","title":"InfluxDB","text":"

    InfluxDB is an open-source time-series database. Harvest ships with some sample Grafana dashboards that are designed to work with InfluxDB. Unlike the Prometheus exporter, Harvest's InfluxDB exporter pushes metrics from the poller to InfluxDB via InfluxDB's line protocol. The exporter is compatible with InfluxDB v2.0.

    Note

    Harvest includes a subset of dashboards for InfluxDB. There is a richer set of dashboards available for Prometheus.

    More information:

    • InfluxDB Exporter
    "},{"location":"concepts/#dashboards","title":"Dashboards","text":"

    Harvest ships with a set of Grafana dashboards that are primarily designed to work with Prometheus. The dashboards are located in the grafana/dashboards directory. Harvest does not include Grafana, only the dashboards for it. Grafana must be installed separately via Docker, NAbox, or other means.

    Harvest includes CLI tools to import and export dashboards to Grafana. The CLI tools are available by running bin/harvest grafana --help

    More information:

    • Import or Export Dashboards
    • How to Create A New Dashboard
    "},{"location":"concepts/#prometheus-auto-discovery","title":"Prometheus Auto-Discovery","text":"

    Because of Prometheus's pull model, you need to configure Prometheus to tell it where to pull metrics from. There are two ways to tell Prometheus how to scrape Harvest: 1) listing each poller's address and port individually in Prometheus's config file or 2) using HTTP service discovery.

    Harvest's admin node implements Prometheus's HTTP service discovery API. Each poller registers its address and port with the admin node and Prometheus consults with the admin node for the list of targets it should scrape.

    More information:

    • Configure Prometheus to scrape Harvest pollers
    • Prometheus Admin Node
    • Prometheus HTTP Service Discovery
    "},{"location":"concepts/#docker","title":"Docker","text":"

    Harvest runs natively in containers. The Harvest container includes the harvest and poller binaries as well as all templates and dashboards. If you want to standup Harvest, Prometheus, and Grafana all together, you can use the Docker Compose workflow. The Docker Compose workflow is a good way to quickly get started with Harvest.

    More information:

    • Running Harvest in Docker
    • Running Harvest, Prometheus, and Grafana in Docker
    "},{"location":"concepts/#nabox","title":"NABox","text":"

    NABox is a separate virtual appliance (.ova) that acts as a front-end to Harvest and includes Promethus and Grafana setup to use with Harvest. NABox is a great option for customers that prefer a virtual appliance over containers.

    More information:

    • NABox
    "},{"location":"concepts/#port-map","title":"Port Map","text":"

    The default ports for ONTAP, Grafana, and Prometheus are shown below, along with three pollers. Poller1 is using the PrometheusExporter with a statically defined port in harvest.yml. Poller2 and Poller3 are using Harvest's admin node, port range, and Prometheus HTTP service discovery.

    graph LR\n  Poller1 -->|:443|ONTAP1;\n  Prometheus -->|:promPort1|Poller1;\n  Prometheus -->|:promPort2|Poller2;\n  Prometheus -->|:promPort3|Poller3;\n  Prometheus -->|:8887|AdminNode;\n\n  Poller2 -->|:443|ONTAP2;\n  AdminNode <-->|:8887|Poller3;\n  Poller3 -->|:443|ONTAP3;\n  AdminNode <-->|:8887|Poller2;\n\n  Grafana -->|:9090|Prometheus;\n  Browser -->|:3000|Grafana;
    • Grafana's default port is 3000 and is used to access the Grafana user-interface via a web browser
    • Prometheus's default port is 9090 and Grafana talks to the Prometheus datasource on that port
    • Prometheus scrapes each poller-exposed Prometheus port (promPort1, promPort2, promPort3)
    • Poller2 and Poller3 are configured to use a PrometheusExporter with port range. Each pollers picks a free port within the port_range and sends that port to the AdminNode.
    • The Prometheus config file, prometheus.yml is updated with two scrape targets:

      1. the static address:port for Poller1
      2. the address:port for the AdminNode
    • Poller1 creates an HTTP endpoint on the static port defined in the harvest.yml file

    • All pollers use ZAPI or REST to communicate with ONTAP on port 443
    "},{"location":"concepts/#reference","title":"Reference","text":"
    • Architecture.md
    "},{"location":"configure-ems/","title":"EMS","text":""},{"location":"configure-ems/#ems-collector","title":"EMS collector","text":"

    The EMS collector collects ONTAP event management system (EMS) events via the ONTAP REST API.

    The EMS alert runbook includes descriptions and remediation steps for the EMS events that Harvest collects.

    This collector uses a YAML template file to define which events to collect, export, and what labels to attach to each metric. This means you can collect new EMS events or attach new labels by editing the default template file or by extending existing templates. Events that occurred when the EMS collector was not running will not be captured.

    The default template file contains 98 EMS events.

    "},{"location":"configure-ems/#supported-ontap-systems","title":"Supported ONTAP Systems","text":"

    Any cDOT ONTAP system using 9.6 or higher.

    "},{"location":"configure-ems/#requirements","title":"Requirements","text":"

    It is recommended to create a read-only user on the ONTAP system. See prepare an ONTAP cDOT cluster for details.

    "},{"location":"configure-ems/#metrics","title":"Metrics","text":"

    This collector collects EMS events from ONTAP and for each received EMS event, creates new metrics prefixed with ems_events.

    Harvest supports two types of ONTAP EMS events:

    • Normal EMS events

    Single shot events. When ONTAP detects a problem, an event is raised. When the issue is addressed, ONTAP does not raise another event reflecting that the problem was resolved.

    • Bookend EMS events

    ONTAP creates bookend events in matching pairs. ONTAP creates an event when an issue is detected and another paired event when the event is resolved. Typically, these events share a common set of properties.

    "},{"location":"configure-ems/#collector-configuration","title":"Collector Configuration","text":"

    The parameters of the collector are distributed across three files:

    • Harvest configuration file (default: harvest.yml)
    • EMS collector configuration file (default: conf/ems/default.yaml)
    • EMS template file (located in conf/ems/9.6.0/ems.yaml)

    Except for addr, datacenter, and auth_style, all other parameters of the EMS collector can be defined in either of these three files. Parameters defined in the lower-level files, override parameters in the higher-level file. This allows you to configure each EMS event individually, or use the same parameters for all events.

    "},{"location":"configure-ems/#ems-collector-configuration-file","title":"EMS Collector Configuration File","text":"

    This configuration file contains the parameters that are used to configure the EMS collector. These parameters can be defined in your harvest.yml or conf/ems/default.yaml file.

    parameter type description default client_timeout Go duration how long to wait for server responses 1m schedule list, required the polling frequency of the collector/object. Should include exactly the following two elements in the order specified: - instance Go duration polling frequency for updating the instance cache (example value: 24h = 1440m) - data Go duration polling frequency for updating the data cache (example value: 3m)Note Harvest allows defining poll intervals on sub-second level (e.g. 1ms), however keep in mind the following:
    • API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than client_timeout.
    • Small poll intervals will create significant workload on the ONTAP system.

    The EMS configuration file should contain the following section mapping the Ems object to the corresponding template file.

    objects:\n  Ems: ems.yaml\n

    Even though the EMS mapping shown above references a single file named ems.yaml, there may be multiple versions of that file across subdirectories named after ONTAP releases. See cDOT for examples.

    At runtime, the EMS collector will select the appropriate object configuration file that most closely matches the targeted ONTAP system.

    "},{"location":"configure-ems/#ems-template-file","title":"EMS Template File","text":"

    The EMS template file should contain the following parameters:

    parameter type description default name string display name of the collector. this matches the named defined in your conf/ems/default.yaml file EMS object string short name of the object, used to prefix metrics ems query string REST API endpoint used to query EMS events api/support/ems/events exports list list of default labels attached to each exported metric events list list of EMS events to collect. See Event Parameters"},{"location":"configure-ems/#event-parameters","title":"Event Parameters","text":"

    This section defines the list of EMS events you want to collect, which properties to export, what labels to attach, and how to handle bookend pairs. The EMS event template parameters are explained below along with an example for reference.

    • name is the ONTAP EMS event name. (collect ONTAP EMS events with the name of LUN.offline)
    • matches list of name-value pairs used to further filter ONTAP events. Some EMS events include arguments and these name-value pairs provide a way to filter on those arguments. (Only collect ONTAP EMS events where volume_name has the value abc_vol)
    • exports list of EMS event parameters to export. These exported parameters are attached as labels to each matching EMS event.
      • labels that are prefixed with ^^ use that parameter to define instance uniqueness.
    • resolve_when_ems (applicable to bookend events only). Lists the resolving event that pairs with the issuing event
      • name is the ONTAP EMS event name of the resolving EMS event (LUN.online). When the resolving event is received, the issuing EMS event will be resolved. In this example, Harvest will raise an event when it finds the ONTAP EMS event named LUN.offline and that event will be resolved when the EMS event named LUN.online is received.
      • resolve_after (optional, Go duration, default = 28 days) resolve the issuing EMS after the specified duration has elapsed (672h = 28d). If the bookend pair is not received within the resolve_after duration, the Issuing EMS event expires. When that happens, Harvest will mark the event as auto resolved by adding the autoresolved=true label to the issuing EMS event.
      • resolve_key (optional) bookend key used to match bookend EMS events. Defaults to prefixed (^^) labels in exports section. resolve_key allows you to override what is defined in the exports section.

    Labels are only exported if they are included in the exports section.

    Example template definition for the LUN.offline EMS event:

      - name: LUN.offline\n    matches:\n      - name: volume_name\n        value: abc_vol\n    exports:\n      - ^^parameters.object_uuid            => object_uuid\n      - parameters.object_type              => object_type\n      - parameters.lun_path                 => lun_path\n      - parameters.volume_name              => volume\n      - parameters.volume_dsid              => volume_ds_id\n    resolve_when_ems:\n      - name: LUN.online\n        resolve_after: 672h\n        resolve_key:\n          - ^^parameters.object_uuid        => object_uuid\n
    "},{"location":"configure-ems/#how-do-i-find-the-full-list-of-supported-ems-events","title":"How do I find the full list of supported EMS events?","text":"

    ONTAP documents the list of EMS events created in the ONTAP EMS Event Catalog.

    You can also query a live system and ask the cluster for its event catalog like so:

    curl --insecure --user \"user:password\" 'https://10.61.124.110/api/support/ems/messages?fields=*'\n

    Example Output

    {\n  \"records\": [\n    {\n      \"name\": \"AccessCache.NearLimits\",\n      \"severity\": \"alert\",\n      \"description\": \"This message occurs when the access cache module is near its limits for entries or export rules. Reaching these limits can prevent new clients from being able to mount and perform I/O on the storage system, and can also cause clients to be granted or denied access based on stale cached information.\",\n      \"corrective_action\": \"Ensure that the number of clients accessing the storage system continues to be below the limits for access cache entries and export rules across those entries. If the set of clients accessing the storage system is constantly changing, consider using the \\\"vserver export-policy access-cache config modify\\\" command to reduce the harvest timeout parameter so that cache entries for clients that are no longer accessing the storage system can be evicted sooner.\",\n      \"snmp_trap_type\": \"severity_based\",\n      \"deprecated\": false\n    },\n...\n    {\n      \"name\": \"ztl.smap.online.status\",\n      \"severity\": \"notice\",\n      \"description\": \"This message occurs when the specified partition on a Software Defined Flash drive could not be onlined due to internal S/W or device error.\",\n      \"corrective_action\": \"NONE\",\n      \"snmp_trap_type\": \"severity_based\",\n      \"deprecated\": false\n    }\n  ],\n  \"num_records\": 7273\n}\n
    "},{"location":"configure-ems/#ems-prometheus-alerts","title":"Ems Prometheus Alerts","text":"

    Refer Prometheus-Alerts

    "},{"location":"configure-grafana/","title":"Configure Grafana","text":""},{"location":"configure-grafana/#grafana","title":"Grafana","text":"

    Grafana hosts the Harvest dashboards and needs to be setup before importing your dashboards.

    "},{"location":"configure-harvest-advanced/","title":"Configure Harvest (advanced)","text":"

    This chapter describes additional advanced configuration possibilities of NetApp Harvest. For a typical installation, this level of detail is likely not needed.

    "},{"location":"configure-harvest-advanced/#variable-expansion","title":"Variable Expansion","text":"

    The harvest.yml configuration file supports variable expansion. This allows you to use environment variables in the configuration file. Harvest will expand strings with the format $__env{VAR} or ${VAR}, replacing the variable VAR with the value of the environment variable. If the environment variable is not set, the variable will be replaced with an empty string.

    Here's an example snippet from harvest.yml:

    Pollers:\n  netapp_frankfurt:\n    addr: 10.0.1.2\n    username: $__env{NETAPP_FRANKFURT_RO_USER}\n  netapp_london:\n    addr: uk-cluster\n    username: ${NETAPP_LONDON_RO_USER}\n  netapp_rtp:\n    addr: 10.0.1.4\n    username: $__env{NETAPP_RTP_RO_USER}\n

    If you set the environment variable NETAPP_FRANKFURT_RO_USER to harvest1 and NETAPP_LONDON_RO_USER to harvest2, the configuration will be expanded to:

    Pollers:\n  netapp_frankfurt:\n    addr: 10.0.1.2\n    username: harvest1\n  netapp_london:\n    addr: uk-cluster\n    username: harvest2\n  netapp_rtp:\n    addr: 10.0.1.4\n    username: \n
    "},{"location":"configure-harvest-basic/","title":"Configure Harvest (basic)","text":"

    The main configuration file, harvest.yml, consists of the following sections, described below:

    "},{"location":"configure-harvest-basic/#pollers","title":"Pollers","text":"

    All pollers are defined in harvest.yml, the main configuration file of Harvest, under the section Pollers.

    parameter type description default Poller name (header) required Poller name, user-defined value datacenter required Datacenter name, user-defined value addr required by some collectors IPv4, IPv6 or FQDN of the target system collectors required List of collectors to run for this poller exporters required List of exporter names from the Exporters section. Note: this should be the name of the exporter (e.g. prometheus1), not the value of the exporter key (e.g. Prometheus) auth_style required by Zapi* collectors Either basic_auth or certificate_auth See authentication for details basic_auth username, password required if auth_style is basic_auth ssl_cert, ssl_key optional if auth_style is certificate_auth Paths to SSL (client) certificate and key used to authenticate with the target system.If not provided, the poller will look for <hostname>.key and <hostname>.pem in $HARVEST_HOME/cert/.To create certificates for ONTAP systems, see using certificate authentication ca_cert optional if auth_style is certificate_auth Path to file that contains PEM encoded certificates. Harvest will append these certificates to the system-wide set of root certificate authorities (CA).If not provided, the OS's root CAs will be used.To create certificates for ONTAP systems, see using certificate authentication use_insecure_tls optional, bool If true, disable TLS verification when connecting to ONTAP cluster false credentials_file optional, string Path to a yaml file that contains cluster credentials. The file should have the same shape as harvest.yml. See here for examples. Path can be relative to harvest.yml or absolute. credentials_script optional, section Section that defines how Harvest should fetch credentials via external script. See here for details. tls_min_version optional, string Minimum TLS version to use when connecting to ONTAP cluster: One of tls10, tls11, tls12 or tls13 Platform decides labels optional, list of key-value pairs Each of the key-value pairs will be added to a poller's metrics. Details below log_max_bytes Maximum size of the log file before it will be rotated 10 MB log_max_files Number of rotated log files to keep 5 log optional, list of collector names Matching collectors log their ZAPI request/response prefer_zapi optional, bool Use the ZAPI API if the cluster supports it, otherwise allow Harvest to choose REST or ZAPI, whichever is appropriate to the ONTAP version. See rest-strategy for details. conf_path optional, : separated list of directories The search path Harvest uses to load its templates. Harvest walks each directory in order, stopping at the first one that contains the desired template. conf"},{"location":"configure-harvest-basic/#defaults","title":"Defaults","text":"

    This section is optional. If there are parameters identical for all your pollers (e.g., datacenter, authentication method, login preferences), they can be grouped under this section. The poller section will be checked first, and if the values aren't found there, the defaults will be consulted.

    "},{"location":"configure-harvest-basic/#exporters","title":"Exporters","text":"

    All exporters need two types of parameters:

    • exporter parameters - defined in harvest.yml under Exporters section
    • export_options - these options are defined in the Matrix data structure emitted from collectors and plugins

    The following two parameters are required for all exporters:

    parameter type description default Exporter name (header) required Name of the exporter instance, this is a user-defined value exporter required Name of the exporter class (e.g. Prometheus, InfluxDB, Http) - these can be found under the cmd/exporters/ directory

    Note: when we talk about the Prometheus Exporter or InfluxDB Exporter, we mean the Harvest modules that send the data to a database, NOT the names used to refer to the actual databases.

    "},{"location":"configure-harvest-basic/#prometheus-exporter","title":"Prometheus Exporter","text":""},{"location":"configure-harvest-basic/#influxdb-exporter","title":"InfluxDB Exporter","text":""},{"location":"configure-harvest-basic/#tools","title":"Tools","text":"

    This section is optional. You can uncomment the grafana_api_token key and add your Grafana API token so harvest does not prompt you for the key when importing dashboards.

    Tools:\n  #grafana_api_token: 'aaa-bbb-ccc-ddd'\n
    "},{"location":"configure-harvest-basic/#poller_files","title":"Poller_files","text":"

    Harvest supports loading pollers from multiple files specified in the Poller_files section of your harvest.yml file. For example, the following snippet tells harvest to load pollers from all the *.yml files under the configs directory, and from the path/to/single.yml file.

    Paths may be relative or absolute.

    Poller_files:\n    - configs/*.yml\n    - path/to/single.yml\n\nPollers:\n    u2:\n        datacenter: dc-1\n

    Each referenced file can contain one or more unique pollers. Ensure that you include the top-level Pollers section in these files. All other top-level sections will be ignored. For example:

    # contents of configs/00-rtp.yml\nPollers:\n  ntap3:\n    datacenter: rtp\n\n  ntap4:\n    datacenter: rtp\n---\n# contents of configs/01-rtp.yml\nPollers:\n  ntap5:\n    datacenter: blr\n---\n# contents of path/to/single.yml\nPollers:\n  ntap1:\n    datacenter: dc-1\n\n  ntap2:\n    datacenter: dc-1\n

    At runtime, all files will be read and combined into a single configuration. The example above would result in the following set of pollers in this order.

    - u2\n- ntap3\n- ntap4\n- ntap5\n- ntap1\n- ntap2\n

    When using glob patterns, the list of matching paths will be sorted before they are read. Errors will be logged for all duplicate pollers and Harvest will refuse to start.

    "},{"location":"configure-harvest-basic/#configuring-collectors","title":"Configuring collectors","text":"

    Collectors are configured by their own configuration files (templates), which are stored in subdirectories in conf/. Most collectors run concurrently and collect a subset of related metrics. For example, node related metrics are grouped together and run independently of the disk-related metrics. Below is a snippet from conf/zapi/default.yaml

    In this example, the default.yaml template contains a list of objects (e.g., Node) that reference sub-templates (e.g., node.yaml). This decomposition groups related metrics together and at runtime, a Zapi collector per object will be created and each of these collectors will run concurrently.

    Using the snippet below, we expect there to be four Zapi collectors running, each with a different subtemplate and object.

    collector:          Zapi\nobjects:\n  Node:             node.yaml\n  Aggregate:        aggr.yaml\n  Volume:           volume.yaml\n  SnapMirror:       snapmirror.yaml\n

    At start-up, Harvest looks for two files (default.yaml and custom.yaml) in the conf directory of the collector (e.g. conf/zapi/default.yaml). The default.yaml is installed by default, while the custom.yaml is an optional file you can create to add new templates.

    When present, the custom.yaml file will be merged with the default.yaml file. This behavior can be overridden in your harvest.yml, see here for an example.

    For a list of collector-specific parameters, refer to their individual documentation.

    "},{"location":"configure-harvest-basic/#zapi-and-zapiperf","title":"Zapi and ZapiPerf","text":""},{"location":"configure-harvest-basic/#rest-and-restperf","title":"Rest and RestPerf","text":""},{"location":"configure-harvest-basic/#ems","title":"EMS","text":""},{"location":"configure-harvest-basic/#storagegrid","title":"StorageGRID","text":""},{"location":"configure-harvest-basic/#unix","title":"Unix","text":""},{"location":"configure-harvest-basic/#labels","title":"Labels","text":"

    Labels offer a way to add additional key-value pairs to a poller's metrics. These allow you to tag a cluster's metrics in a cross-cutting fashion. Here's an example:

      cluster-03:\n    datacenter: DC-01\n    addr: 10.0.1.1\n    labels:\n      - org: meg       # add an org label with the value \"meg\"\n      - ns:  rtp       # add a namespace label with the value \"rtp\"\n

    These settings add two key-value pairs to each metric collected from cluster-03 like this:

    node_vol_cifs_write_data{org=\"meg\",ns=\"rtp\",datacenter=\"DC-01\",cluster=\"cluster-03\",node=\"umeng-aff300-05\"} 10\n

    Keep in mind that each unique combination of key-value pairs increases the amount of stored data. Use them sparingly. See PrometheusNaming for details.

    "},{"location":"configure-harvest-basic/#authentication","title":"Authentication","text":"

    When authenticating with ONTAP and StorageGRID clusters, Harvest supports both client certificates and basic authentication.

    These methods of authentication are defined in the Pollers or Defaults section of your harvest.yml using one or more of the following parameters.

    parameter description default Link auth_sytle One of basic_auth or certificate_auth Optional when using credentials_file or credentials_script basic_auth link username Username used for authenticating to the remote system link password Password used for authenticating to the remote system link credentials_file Relative or absolute path to a yaml file that contains cluster credentials link credentials_script External script Harvest executes to retrieve credentials link"},{"location":"configure-harvest-basic/#precedence","title":"Precedence","text":"

    When multiple authentication parameters are defined at the same time, Harvest tries each method listed below, in the following order, to resolve authentication requests. The first method that returns a non-empty password stops the search.

    When these parameters exist in both the Pollers and Defaults section, the Pollers section will be consulted before the Defaults.

    section parameter Pollers auth_style: certificate_auth Pollers auth_style: basic_auth with username and password Pollers credentials_script Pollers credentials_file Defaults auth_style: certificate_auth Defaults auth_style: basic_auth with username and password Defaults credentials_script Defaults credentials_file"},{"location":"configure-harvest-basic/#credentials-file","title":"Credentials File","text":"

    If you would rather not list cluster credentials in your harvest.yml, you can use the credentials_file section in your harvest.yml to point to a file that contains the credentials. At runtime, the credentials_file will be read and the included credentials will be used to authenticate with the matching cluster(s).

    This is handy when integrating with 3rd party credential stores. See #884 for examples.

    The format of the credentials_file is similar to harvest.yml and can contain multiple cluster credentials.

    Example:

    Snippet from harvest.yml:

    Pollers:\n  cluster1:\n    addr: 10.193.48.11\n    credentials_file: secrets/cluster1.yml\n    exporters:\n      - prom1 \n

    File secrets/cluster1.yml:

    Pollers:\n  cluster1:\n    username: harvest\n    password: foo\n
    "},{"location":"configure-harvest-basic/#credentials-script","title":"Credentials Script","text":"

    The credentials_script feature allows you to fetch authentication information via an external script. This can be configured in the Pollers section of your harvest.yml file, as shown in the example below.

    At runtime, Harvest will invoke the script specified in the credentials_script path section. Harvest will call the script with one or two arguments depending on how your poller is configured in the harvest.yml file. The script will be called like this: ./script $addr or ./script $addr $username.

    • The first argument $addr is the address of the cluster taken from the addr field under the Pollers section of your harvest.yml file.
    • The second argument $username is the username for the cluster taken from the username field under the Pollers section of your harvest.yml file. If your harvest.yml does not include a username, nothing will be passed.

    The script should communicate the credentials to Harvest by writing the response to its standard output (stdout). Harvest supports two output formats from the script:

    1. YAML format:
    2. If the script outputs a YAML object with username and password keys, Harvest will use both the username and password from the output. For example, if the script writes the following, Harvest will use myuser and mypassword for the poller's credentials.

      username: myuser\npassword: mypassword\n
      If only the password is provided, Harvest will use the username from the harvest.yml file, if available. If your username or password contains spaces, #, or other characters with special meaning in YAML, make sure you quote the value like so: password: \"my password with spaces\"

    3. If the script outputs a YAML object containing an authToken, Harvest will use this authToken when communicating with ONTAP or StorageGRID clusters. Harvest will include the authToken in the HTTP request's authorization header using the Bearer authentication scheme.

      authToken: eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJEcEVkRmgyODlaTXpYR25OekFvaWhTZ0FaUnBtVlVZSDJ3R3dXb0VIWVE0In0.eyJleHAiOjE3MjE4Mj\n

    4. Plain text format: If the script outputs plain text, Harvest will use the output as the password. The username will be taken from the harvest.yml file, if available. For example, if the script writes the following to its stdout, Harvest will use the username defined in that poller's section of the harvest.yml and mypassword for the poller's credentials.

      mypassword\n

    If the script doesn't finish within the specified timeout, Harvest will terminate the script and any spawned processes.

    Credential scripts are defined under the credentials_script section within Pollers in your harvest.yml. Below are the options for the credentials_script section:

    parameter type description default path string Absolute path to the script that takes two arguments: addr and username, in that order. schedule go duration or always Schedule for calling the authentication script. If set to always, the script is called every time a password is requested; otherwise, the previously cached value is used. 24h timeout go duration Maximum time Harvest will wait for the script to finish before terminating it and its descendants. 10s"},{"location":"configure-harvest-basic/#example","title":"Example","text":"

    Here is an example of how to configure the credentials_script in the harvest.yml file:

    Pollers:\n  ontap1:\n    datacenter: rtp\n    addr: 10.1.1.1\n    username: admin # Optional: if not provided, the script must return the username\n    collectors:\n      - Rest\n      - RestPerf\n    credentials_script:\n      path: ./get_credentials\n      schedule: 3h\n      timeout: 10s\n

    In this example, the get_credentials script should be located in the same directory as the harvest.yml file and should be executable. It should output the credentials in either YAML or plain text format. Here are three example scripts:

    get_credentials that outputs username and password in YAML format:

    #!/bin/bash\ncat << EOF\nusername: myuser\npassword: mypassword\nEOF\n

    get_credentials that outputs authToken in YAML format:

    #!/bin/bash\n# script requests an access token from the authorization server\n# authorization returns an access token to the script\n# script writes the YAML formatted authToken like so:\ncat << EOF\nauthToken: $authToken\nEOF\n

    Below are a couple of OAuth2 credential script examples for authenticating with ONTAP or StorageGRID OAuth2-enabled clusters.

    These are examples that you will need to adapt to your environment.

    Example OAuth2 script authenticating with the Keycloak auth provider via curl. Uses jq to extract the token. This script outputs the authToken in YAML format.

    #!/bin/bash\n\nresponse=$(curl --silent \"http://{KEYCLOAK_IP:PORT}/realms/{REALM_NAME}/protocol/openid-connect/token\" \\\n  --header \"Content-Type: application/x-www-form-urlencoded\" \\\n  --data-urlencode \"grant_type=password\" \\\n  --data-urlencode \"username={USERNAME}\" \\\n  --data-urlencode \"password={PASSWORD}\" \\\n  --data-urlencode \"client_id={CLIENT_ID}\" \\\n  --data-urlencode \"client_secret={CLIENT_SECRET}\")\n\naccess_token=$(echo \"$response\" | jq -r '.access_token')\n\ncat << EOF\nauthToken: $access_token\nEOF\n

    Example OAuth2 script authenticating with the Auth0 auth provider via curl. Uses jq to extract the token. This script outputs the authToken in YAML format.

    #!/bin/bash\nresponse=$(curl --silent https://{AUTH0_TENANT_URL}/oauth/token \\\n  --header 'content-type: application/json' \\\n  --data '{\"client_id\":\"{CLIENT_ID}\",\"client_secret\":\"{CLIENT_SECRET}\",\"audience\":\"{ONTAP_CLUSTER_IP}\",\"grant_type\":\"client_credentials\"')\n\naccess_token=$(echo \"$response\" | jq -r '.access_token')\n\ncat << EOF\nauthToken: $access_token\nEOF\n

    get_credentials that outputs only the password in plain text format:

    #!/bin/bash\necho \"mypassword\"\n

    "},{"location":"configure-harvest-basic/#troubleshooting","title":"Troubleshooting","text":"
    • Make sure your script is executable
    • Ensure the user/group that executes your poller also has read and execute permissions on the script. su as the user/group that runs Harvest and make sure you can execute the script too.
    "},{"location":"configure-rest/","title":"REST","text":""},{"location":"configure-rest/#rest-collector","title":"Rest Collector","text":"

    The Rest collectors uses the REST protocol to collect data from ONTAP systems.

    The RestPerf collector is an extension of this collector, therefore they share many parameters and configuration settings.

    "},{"location":"configure-rest/#target-system","title":"Target System","text":"

    Target system can be cDot ONTAP system. 9.12.1 and after are supported, however the default configuration files may not completely match with all versions. See REST Strategy for more details.

    "},{"location":"configure-rest/#requirements","title":"Requirements","text":"

    No SDK or other requirements. It is recommended to create a read-only user for Harvest on the ONTAP system (see prepare monitored clusters for details)

    "},{"location":"configure-rest/#metrics","title":"Metrics","text":"

    The collector collects a dynamic set of metrics. ONTAP returns JSON documents and Harvest allows you to define templates to extract values from the JSON document via a dot notation path. You can view ONTAP's full set of REST APIs by visiting https://docs.netapp.com/us-en/ontap-automation/reference/api_reference.html#access-a-copy-of-the-ontap-rest-api-reference-documentation

    As an example, the /api/storage/aggregates endpoint, lists all data aggregates in the cluster. Below is an example response from this endpoint:

    {\n  \"records\": [\n    {\n      \"uuid\": \"3e59547d-298a-4967-bd0f-8ae96cead08c\",\n      \"name\": \"umeng_aff300_aggr2\",\n      \"space\": {\n        \"block_storage\": {\n          \"size\": 8117898706944,\n          \"available\": 4889853616128\n        }\n      },\n      \"state\": \"online\",\n      \"volume_count\": 36\n    }\n  ]\n}\n

    The Rest collector will take this document, extract the records section and convert the metrics above into: name, space.block_storage.size, space.block_storage.available, state and volume_count. Metric names will be taken, as is, unless you specify a short display name. See counters for more details.

    "},{"location":"configure-rest/#parameters","title":"Parameters","text":"

    The parameters of the collector are distributed across three files:

    • Harvest configuration file (default: harvest.yml)
    • Rest configuration file (default: conf/rest/default.yaml)
    • Each object has its own configuration file (located in conf/rest/$version/)

    Except for addr and datacenter, all other parameters of the Rest collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level ones. This allows you to configure each object individually, or use the same parameters for all objects.

    The full set of parameters are described below.

    "},{"location":"configure-rest/#collector-configuration-file","title":"Collector configuration file","text":"

    This configuration file contains a list of objects that should be collected and the filenames of their templates ( explained in the next section).

    Additionally, this file contains the parameters that are applied as defaults to all objects. As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well.

    parameter type description default client_timeout duration (Go-syntax) how long to wait for server responses 30s jitter duration (Go-syntax), optional Each Harvest collector runs independently, which means that at startup, each collector may send its REST queries at nearly the same time. To spread out the collector startup times over a broader period, you can use jitter to randomly distribute collector startup across a specified duration. For example, a jitter of 1m starts each collector after a random delay between 0 and 60 seconds. For more details, refer to this discussion. schedule list, required how frequently to retrieve metrics from ONTAP - data duration (Go-syntax) how frequently this collector/object should retrieve metrics from ONTAP 3 minutes

    The template should define objects in the objects section. Example:

    objects:\n  Aggregate: aggr.yaml\n

    For each object, we define the filename of the object configuration file. The object configuration files are located in subdirectories matching the ONTAP version that was used to create these files. It is possible to have multiple version-subdirectories for multiple ONTAP versions. At runtime, the collector will select the object configuration file that closest matches the version of the target ONTAP system.

    "},{"location":"configure-rest/#object-configuration-file","title":"Object configuration file","text":"

    The Object configuration file (\"subtemplate\") should contain the following parameters:

    parameter type description default name string, required display name of the collector that will collect this object query string, required REST endpoint used to issue a REST request object string, required short name of the object counters string list of counters to collect (see notes below) plugins list plugins and their parameters to run on the collected data export_options list parameters to pass to exporters (see notes below)"},{"location":"configure-rest/#template-example","title":"Template Example:","text":"
    name:                     Volume\nquery:                    api/storage/volumes\nobject:                   volume\n\ncounters:\n  - ^^name                                        => volume\n  - ^^svm.name                                    => svm\n  - ^aggregates.#.name                            => aggr\n  - ^anti_ransomware.state                        => antiRansomwareState\n  - ^state                                        => state\n  - ^style                                        => style\n  - space.available                               => size_available\n  - space.overwrite_reserve                       => overwrite_reserve_total\n  - space.overwrite_reserve_used                  => overwrite_reserve_used\n  - space.percent_used                            => size_used_percent\n  - space.physical_used                           => space_physical_used\n  - space.physical_used_percent                   => space_physical_used_percent\n  - space.size                                    => size\n  - space.used                                    => size_used\n  - hidden_fields:\n      - anti_ransomware.state\n      - space\n  - filter:\n      - name=*harvest*\n\nplugins:\n  - LabelAgent:\n      exclude_equals:\n        - style `flexgroup_constituent`\n\nexport_options:\n  instance_keys:\n    - aggr\n    - style\n    - svm\n    - volume\n  instance_labels:\n    - antiRansomwareState\n    - state\n
    "},{"location":"configure-rest/#counters","title":"Counters","text":"

    This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or histograms. The exact property of each counter is fetched from ONTAP and updated periodically.

    The display name of a counter can be changed with => (e.g., space.block_storage.size => space_total).

    Counters that are stored as labels will only be exported if they are included in the export_options section.

    The counters section allows you to specify hidden_fields and filter parameters. Please find the detailed explanation below.

    "},{"location":"configure-rest/#hidden_fields","title":"Hidden_fields","text":"

    There are some fields that ONTAP will not return unless you explicitly ask for them, even when using the URL parameter fields=**. hidden_fields is how you tell ONTAP which additional fields it should include in the REST response.

    "},{"location":"configure-rest/#filter","title":"Filter","text":"

    The filter is used to constrain the data returned by the endpoint, allowing for more targeted data retrieval. The filtering uses ONTAP's REST record filtering. The example above asks ONTAP to only return records where a volume's name matches *harvest*.

    If you're familiar with ONTAP's REST record filtering, the example above would become name=*harvest* and appended to the final URL like so:

    https://CLUSTER_IP/api/storage/volumes?fields=*,anti_ransomware.state,space&name=*harvest*\n

    Refer to the ONTAP API specification, sections: query parameters and record filtering, for more details.

    "},{"location":"configure-rest/#export_options","title":"Export_options","text":"

    Parameters in this section tell the exporters how to handle the collected data.

    There are two different kinds of time-series that Harvest publishes: metrics and instance labels.

    • Metrics are numeric data with associated labels (key-value pairs). E.g. volume_read_ops_total{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\"} 123. The volume_read_ops_total metric is exporting three labels: cluster, node, and volume and the metric value is 123.
    • Instance labels are named after their associated config object (e.g., volume_labels, qtree_labels, etc.). There will be one instance label for each object instance, and each instance label will contain a set of associated labels (key-value pairs) that are defined in the templates instance_labels parameter. E.g. volume_labels{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\", svm=\"svm1\"} 1. The volume_labels instance label is exporting four labels: cluster, node, volume, and svm. Instance labels always export a metric value of 1.

    The export_options section allows you to define how to export these time-series.

    • instances_keys (list): display names of labels to export to both metric and instance labels. For example, if you list the svm counter under instances_keys, that key-value will be included in all time-series metrics and all instance-labels.
    • instance_labels (list): display names of labels to export with the corresponding instance label config object. For example, if you want the volume counter to be exported with the volume_labels instance label, you would list volume in the instance_labels section.
    • include_all_labels (bool): exports all labels for all time-series metrics. If there are no metrics defined in the template, this option will do nothing. This option also overrides the previous two parameters. See also collect_only_labels.
    "},{"location":"configure-rest/#endpoints","title":"Endpoints","text":"

    In Harvest REST templates, endpoints are additional queries that enhance the data collected from the main query. The main query, identified by the query parameter, is the primary REST API for data collection. For example, the main query for a disk object is api/storage/disks. Typically endpoints are used to query the private CLI to add metrics that are not available via ONTAP's public REST API.

    Within the endpoints section of a Harvest REST template, you can define multiple endpoint entries. Each entry supports its own query and associated counters, allowing you to collect additional metrics or labels from various API. These additional metrics or labels are associated with the main dataset via a key. The key is denoted by the ^^ notation in the counters of both the main query and the endpoints.

    In the example below, the endpoints section makes an additional query to api/private/cli/disk, which collects metrics such as stats_io_kbps, stats_sectors_read, and stats_sectors_written. The uuid is the key that links the data from the api/storage/disks and api/private/cli/disk API. The type label from the api/private/cli/disk endpoint is included as outlined in the export_options.

    name:             Disk\nquery:            api/storage/disks\nobject:           disk\n\ncounters:\n  - ^^uid                       => uuid\n  - ^bay                        => shelf_bay\n  - ^container_type\n  - ^home_node.name             => owner_node\n  - ^model\n  - ^name                       => disk\n  - ^node.name                  => node\n  - ^node.uuid\n  - ^outage.reason              => outage\n  - ^serial_number\n  - ^shelf.uid                  => shelf\n  - ^state\n  - bytes_per_sector            => bytes_per_sector\n  - sector_count                => sectors\n  - stats.average_latency       => stats_average_latency\n  - stats.power_on_hours        => power_on_hours\n  - usable_size\n\nendpoints:\n  - query: api/private/cli/disk\n    counters:\n      - ^^uid                   => uuid\n      - ^type\n      - disk_io_kbps_total      => stats_io_kbps\n      - sectors_read            => stats_sectors_read\n      - sectors_written         => stats_sectors_written\n\nplugins:\n  - Disk\n  - LabelAgent:\n      value_to_num:\n        - new_status outage - - `0` #ok_value is empty value, '-' would be converted to blank while processing.\n      join:\n        - index `_` node,disk\n  - MetricAgent:\n      compute_metric:\n        - uptime MULTIPLY stats.power_on_hours 60 60 #convert to second for zapi parity\n\nexport_options:\n  instance_keys:\n    - disk\n    - index\n    - node\n  instance_labels:\n    - container_type\n    - failed\n    - model\n    - outage\n    - owner_node\n    - serial_number\n    - shared\n    - shelf\n    - shelf_bay\n    - type\n
    "},{"location":"configure-rest/#restperf-collector","title":"RestPerf Collector","text":"

    RestPerf collects performance metrics from ONTAP systems using the REST protocol. The collector is designed to be easily extendable to collect new objects or to collect additional counters from already configured objects.

    This collector is an extension of the Rest collector. The major difference between them is that RestPerf collects only the performance (perf) APIs. Additionally, RestPerf always calculates final values from the deltas of two subsequent polls.

    "},{"location":"configure-rest/#metrics_1","title":"Metrics","text":"

    RestPerf metrics are calculated the same as ZapiPerf metrics. More details about how performance metrics are calculated can be found here.

    "},{"location":"configure-rest/#parameters_1","title":"Parameters","text":"

    The parameters of the collector are distributed across three files:

    • Harvest configuration file (default: harvest.yml)
    • RestPerf configuration file (default: conf/restperf/default.yaml)
    • Each object has its own configuration file (located in conf/restperf/$version/)

    Except for addr, datacenter and auth_style, all other parameters of the RestPerf collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level file. This allows the user to configure each objects individually, or use the same parameters for all objects.

    The full set of parameters are described below.

    "},{"location":"configure-rest/#restperf-configuration-file","title":"RestPerf configuration file","text":"

    This configuration file (the \"template\") contains a list of objects that should be collected and the filenames of their configuration (explained in the next section).

    Additionally, this file contains the parameters that are applied as defaults to all objects. (As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well).

    parameter type description default use_insecure_tls bool, optional skip verifying TLS certificate of the target system false client_timeout duration (Go-syntax) how long to wait for server responses 30s latency_io_reqd int, optional threshold of IOPs for calculating latency metrics (latencies based on very few IOPs are unreliable) 10 jitter duration (Go-syntax), optional Each Harvest collector runs independently, which means that at startup, each collector may send its REST queries at nearly the same time. To spread out the collector startup times over a broader period, you can use jitter to randomly distribute collector startup across a specified duration. For example, a jitter of 1m starts each collector after a random delay between 0 and 60 seconds. For more details, refer to this discussion. schedule list, required the poll frequencies of the collector/object, should include exactly these three elements in the exact same other: - counter duration (Go-syntax) poll frequency of updating the counter metadata cache 20 minutes - instance duration (Go-syntax) poll frequency of updating the instance cache 10 minutes - data duration (Go-syntax) poll frequency of updating the data cache Note Harvest allows defining poll intervals on sub-second level (e.g. 1ms), however keep in mind the following:
    • API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than client_timeout.
    • Small poll intervals will create significant workload on the ONTAP system, as many counters are aggregated on-demand.
    • Some metric values become less significant if they are calculated for very short intervals (e.g. latencies)
    1 minute

    The template should define objects in the objects section. Example:

    objects:\n  SystemNode: system_node.yaml\n  HostAdapter: hostadapter.yaml\n

    Note that for each object we only define the filename of the object configuration file. The object configuration files are located in subdirectories matching to the ONTAP version that was used to create these files. It is possible to have multiple version-subdirectories for multiple ONTAP versions. At runtime, the collector will select the object configuration file that closest matches to the version of the target ONTAP system. (A mismatch is tolerated since RestPerf will fetch and validate counter metadata from the system.)

    "},{"location":"configure-rest/#object-configuration-file_1","title":"Object configuration file","text":"

    Refer Object configuration file

    "},{"location":"configure-rest/#counters_1","title":"Counters","text":"

    See Counters

    Some counters require a \"base-counter\" for post-processing. If the base-counter is missing, RestPerf will still run, but the missing data won't be exported.

    "},{"location":"configure-rest/#export_options_1","title":"Export_options","text":"

    See Export Options

    "},{"location":"configure-rest/#ontap-private-cli","title":"ONTAP Private CLI","text":"

    The ONTAP private CLI allows for more granular control and access to non-public counters. It can be used to fill gaps in the REST API, especially in cases where certain data is not yet available through the REST API. Harvest's REST collector can make full use of ONTAP's private CLI. This means when ONTAP's public REST API is missing counters, Harvest can still collect them as long as those counters are available via ONTAP's CLI.

    For more information on using the ONTAP private CLI with the REST API, you can refer to the following resources:

    • NetApp Documentation: Accessing ONTAP CLI through REST APIs
    • NetApp Blog: Private CLI Passthrough with ONTAP REST API
    "},{"location":"configure-rest/#creating-templates-that-use-ontaps-private-cli","title":"Creating Templates That Use ONTAP's Private CLI","text":"

    Let's take an example of how we can make Harvest use the system fru-check show CLI command.

    system fru-check show\n

    REST APIs endpoint:

    /api/private/cli/system/fru-check?fields=node,fru_name,fru_status\n

    Converting the CLI command system fru-check show for use with a private CLI REST API can be achieved by adhering to the path rules outlined in the ONTAP documentation. Generally, this involves substituting all spaces within the CLI command with a forward slash (/), and converting the ONTAP CLI verb into the corresponding REST verb.

    The show command gets converted to the HTTP method GET call. From the CLI, look at the required field names and pass them as a comma-separated value in fields= in the API endpoint.

    Note: If the field name contains a hyphen (-), it should be converted to an underscore (_) in the REST API field. For example, fru-name becomes fru_name. ONTAP is flexible with the input format and can freely convert between hyphen (-) and underscore (_) forms. However, when it comes to output, ONTAP returns field names with underscores. For compatibility and consistency, it is mandatory to use underscores in field names when working with Harvest REST templates for ONTAP private CLI.

    "},{"location":"configure-rest/#advanced-and-diagnostic-mode-commands","title":"Advanced and Diagnostic Mode Commands","text":"

    The CLI pass through allows you to execute advanced and diagnostic mode CLI commands by including the privilege_level field in your request under the filter setting like so:

    counters:\n  - filter:\n      - privilege_level=diagnostic\n

    "},{"location":"configure-rest/#creating-a-harvest-template-for-private-cli","title":"Creating a Harvest Template for Private CLI","text":"

    Here's a Harvest template that uses ONTAP's private CLI to collect field-replaceable units (FRU) counters by using ONTAP's CLI command system fru-check show

    name:                         FruCheck\nquery:                        api/private/cli/system/fru-check\nobject:                       fru_check\n\ncounters:\n  - ^^node\n  - ^^serial_number              => serial_number\n  - ^fru_name                    => name\n  - ^fru_status                  => status\n\nexport_options:\n  instance_keys:\n    - node\n    - serial_number\n  instance_labels:\n    - name\n    - status\n

    In this template, the query field specifies the private CLI command to be used (system fru-check show). The counters field maps the output of the private CLI command to the fields of the fru_check object. To identify the ONTAP counter names (the left side of the '=>' symbol in the template, such as fru_name), you can establish an SSH connection to your ONTAP cluster. Once connected, leverage ONTAP's command completion functionality to reveal the counter names. For instance, you can type system fru-check show -fields, then press the '?' key. This will display a list of ONTAP field names, as demonstrated below.

    cluster-01::> system fru-check show -fields ?\n  node                        Node\n  serial-number               FRU Serial Number\n  fru-name                    FRU Name\n  fru-type                    FRU Type\n  fru-status                  Status\n  display-name                Display Name\n  location                    Location\n  additional-info             Additional Info\n  reason                      Details\n

    The export_options field specifies how the data should be exported. The instance_keys field lists the fields that will be added as labels to all exported instances of the fru_check object. The instance_labels field lists the fields that should be included as labels in the exported data.

    The output of this template would look like:

    fru_check_labels{cluster=\"umeng-aff300-01-02\",datacenter=\"u2\",name=\"DIMM-1\",node=\"umeng-aff300-02\",serial_number=\"s2\",status=\"pass\"} 1.0\nfru_check_labels{cluster=\"umeng-aff300-01-02\",datacenter=\"u2\",name=\"PCIe Devices\",node=\"umeng-aff300-02\",serial_number=\"s1\",status=\"pass\"} 1.0\n
    "},{"location":"configure-rest/#partial-aggregation","title":"Partial Aggregation","text":"

    There are instances when ONTAP may report partial aggregate results for certain objects (for example, during a node outage). In such cases, the RestPerf Collector will skip the reporting of performance counters for the affected objects.

    To determine whether partial aggregation affects an object, check the numPartials entry in the Harvest logs. If numPartials is greater than zero, it indicates that partial aggregations have occurred for that object. e.g. Collected Poller=aff-251 collector=RestPerf:NFSv4 instances=56 numPartials=15

    "},{"location":"configure-storagegrid/","title":"StorageGRID","text":""},{"location":"configure-storagegrid/#storagegrid-collector","title":"StorageGRID Collector","text":"

    The StorageGRID collector uses REST calls to collect data from StorageGRID systems.

    "},{"location":"configure-storagegrid/#target-system","title":"Target System","text":"

    All StorageGRID versions are supported, however the default configuration files may not completely match with older systems.

    "},{"location":"configure-storagegrid/#requirements","title":"Requirements","text":"

    No SDK or other requirements. It is recommended to create a read-only user for Harvest on the StorageGRID system (see prepare monitored clusters for details)

    "},{"location":"configure-storagegrid/#metrics","title":"Metrics","text":"

    The collector collects a dynamic set of metrics via StorageGRID's REST API. StorageGRID returns JSON documents and Harvest allows you to define templates to extract values from the JSON document via a dot notation path. You can view StorageGRID's full set of REST APIs by visiting https://$STORAGE_GRID_HOSTNAME/grid/apidocs.html

    As an example, the /grid/accounts-cache endpoint, lists the tenant accounts in the cache and includes additional information, such as objectCount and dataBytes. Below is an example response from this endpoint:

    {\n  \"data\": [\n    {\n      \"id\": \"95245224059574669217\",\n      \"name\": \"foople\",\n      \"policy\": {\n        \"quotaObjectBytes\": 50000000000\n      },\n      \"objectCount\": 6,\n      \"dataBytes\": 10473454261\n    }\n  ]\n}\n

    The StorageGRID collector will take this document, extract the data section and convert the metrics above into: name, policy.quotaObjectBytes, objectCount, and dataBytes. Metric names will be taken, as is, unless you specify a short display name. See counters for more details.

    "},{"location":"configure-storagegrid/#parameters","title":"Parameters","text":"

    The parameters of the collector are distributed across three files:

    • Harvest configuration file (default: harvest.yml)
    • StorageGRID configuration file (default: conf/storagegrid/default.yaml)
    • Each object has its own configuration file (located in conf/storagegrid/$version/)

    Except for addr and datacenter, all other parameters of the StorageGRID collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level ones. This allows you to configure each object individually, or use the same parameters for all objects.

    The full set of parameters are described below.

    "},{"location":"configure-storagegrid/#harvest-configuration-file","title":"Harvest configuration file","text":"

    Parameters in the poller section should define the following required parameters.

    parameter type description default Poller name (header) string, required Poller name, user-defined value addr string, required IPv4, IPv6 or FQDN of the target system datacenter string, required Datacenter name, user-defined value username, password string, required StorageGRID username and password with at least Tenant accounts permissions collectors list, required Name of collector to run for this poller, use StorageGrid for this collector"},{"location":"configure-storagegrid/#storagegrid-configuration-file","title":"StorageGRID configuration file","text":"

    This configuration file contains a list of objects that should be collected and the filenames of their templates ( explained in the next section).

    Additionally, this file contains the parameters that are applied as defaults to all objects. As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well.

    parameter type description default client_timeout duration (Go-syntax) how long to wait for server responses 30s schedule list, required how frequently to retrieve metrics from StorageGRID - data duration (Go-syntax) how frequently this collector/object should retrieve metrics from StorageGRID 5 minutes only_cluster_instance bool, optional don't require instance key. assume the only instance is the cluster itself

    The template should define objects in the objects section. Example:

    objects:\n  Tenant: tenant.yaml\n

    For each object, we define the filename of the object configuration file. The object configuration files are located in subdirectories matching the StorageGRID version that was used to create these files. It is possible to have multiple version-subdirectories for multiple StorageGRID versions. At runtime, the collector will select the object configuration file that closest matches the version of the target StorageGRID system.

    "},{"location":"configure-storagegrid/#object-configuration-file","title":"Object configuration file","text":"

    The Object configuration file (\"subtemplate\") should contain the following parameters:

    parameter type description default name string, required display name of the collector that will collect this object query string, required REST endpoint used to issue a REST request object string, required short name of the object api string StorageGRID REST endpoint version to use, overrides default management API version 3 counters list list of counters to collect (see notes below) plugins list plugins and their parameters to run on the collected data export_options list parameters to pass to exporters (see notes below)"},{"location":"configure-storagegrid/#counters","title":"Counters","text":"

    This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or histograms. The exact property of each counter is fetched from StorageGRID and updated periodically.

    The display name of a counter can be changed with => (e.g., policy.quotaObjectBytes => logical_quota).

    Counters that are stored as labels will only be exported if they are included in the export_options section.

    "},{"location":"configure-storagegrid/#export_options","title":"Export_options","text":"

    Parameters in this section tell the exporters how to handle the collected data.

    There are two different kinds of time-series that Harvest publishes: metrics and instance labels.

    • Metrics are numeric data with associated labels (key-value pairs). E.g. volume_read_ops_total{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\"} 123. The volume_read_ops_total metric is exporting three labels: cluster, node, and volume and the metric value is 123.
    • Instance labels are named after their associated config object (e.g., volume_labels, qtree_labels, etc.). There will be one instance label for each object instance, and each instance label will contain a set of associated labels (key-value pairs) that are defined in the templates instance_labels parameter. E.g. volume_labels{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\", svm=\"svm1\"} 1. The volume_labels instance label is exporting four labels: cluster, node, volume, and svm. Instance labels always export a metric value of 1.

    The export_options section allows you to define how to export these time-series.

    • instances_keys (list): display names of labels to export to both metric and instance labels. For example, if you list the svm counter under instances_keys, that key-value will be included in all time-series metrics and all instance-labels.
    • instance_labels (list): display names of labels to export with the corresponding instance label config object. For example, if you want the volume counter to be exported with the volume_labels instance label, you would list volume in the instance_labels section.
    • include_all_labels (bool): exports all labels for all time-series metrics. If there are no metrics defined in the template, this option will do nothing. This option also overrides the previous two parameters. See also collect_only_labels.
    "},{"location":"configure-templates/","title":"Templates","text":""},{"location":"configure-templates/#creatingediting-templates","title":"Creating/editing templates","text":"

    This document covers how to use Collector and Object templates to extend Harvest.

    1. How to add a new object template
    2. How to extend an existing object template

    There are a couple of ways to learn about ZAPIs and their attributes:

    • ONTAP's documentation
    • Using Harvest's zapi tool to explore available APIs and metrics on your cluster. Examples:
    $ harvest zapi --poller <poller> show apis\n  # will print list of apis that are available\n  # usually apis with the \"get-iter\" suffix can provide useful metrics\n$ harvest zapi --poller <poller> show attrs --api volume-get-iter\n  # will print the attribute tree of the API\n$ harvest zapi --poller <poller> show data --api volume-get-iter\n  # will print raw data of the API attribute tree\n

    (Replace <poller> with the name of a poller that can connect to an ONTAP system.)

    "},{"location":"configure-templates/#conf-path","title":"Conf Path","text":"

    The conf path is the colon-separated list of directories that Harvest searches to load templates. Harvest walks each directory in order, stopping at the first one that contains the desired template. The default value of confpath is conf, meaning that only the conf directory is searched for templates.

    There are two ways to change the conf path.

    • You can specify the -confpath command line argument to bin/harvest or bin/poller, e.g. -confpath customconf:conf. Harvest will search the customconf directory followed by the conf directory.

    • You can specify the conf_path parameter in the Pollers section of your harvest.yml file, e.g.

    Pollers:\n  netapp-cluster1: \n    datacenter: dc-1\n    addr: 10.193.48.163\n    conf_path: customconf:/etc/harvest/conf:conf\n

    This conf_path example will search for templates in this order, stopping at the first one that contains the template.

    1. local directory customconf
    2. absolute directory /etc/harvest/conf
    3. local directory conf

    Use the conf path to isolate your edits and extensions to Harvest's builtin templates. This ensures that your customizations won't be affected when you upgrade Harvest.

    When using a custom confpath, make sure your custom directories have the same structure as the default conf directory. In the example below, four template modifications have been setup in the /etc/harvest/customconf directory.

    The poller's conf_path parameter is set to /etc/harvest/customconf:conf to use these modified templates. Harvest will use the custom templates when they match and the default templates otherwise.

    See issue #2330 for more examples.

    # tree /etc/harvest/customconf\n\n/etc/harvest/customconf\n\u251c\u2500\u2500 rest\n\u2502   \u251c\u2500\u2500 9.12.0\n\u2502   \u2502 \u251c\u2500\u2500 aggr.yaml\n\u2502   \u2502 \u2514\u2500\u2500 volume.yaml\n\u251c\u2500\u2500 restperf\n\u2502   \u251c\u2500\u2500 9.13.0\n\u2502   \u2502 \u2514\u2500\u2500 qtree.yaml\n\u251c\u2500\u2500 zapi\n\u2514\u2500\u2500 zapiperf\n    \u251c\u2500\u2500 cdot\n    \u2502 \u2514\u2500\u2500 9.8.0\n    \u2502     \u2514\u2500\u2500 qtree.yaml\n
    "},{"location":"configure-templates/#collector-templates","title":"Collector templates","text":"

    Collector templates define which set of objects Harvest should collect from the system being monitored. In your harvest.yml configuration file, when you say that you want to use a Zapi collector, that collector will read the matching conf/zapi/default.yaml - same with ZapiPerf, it will read the conf/zapiperf/default.yaml file. Below is a snippet from conf/zapi/default.yaml. Each object is mapped to a corresponding object template file. For example, the Node object searches for the most appropriate version of the node.yaml file in the conf/zapi/cdot/** directory.

    collector:          Zapi\nobjects:\n  Node:             node.yaml\n  Aggregate:        aggr.yaml\n  Volume:           volume.yaml\n  Disk:             disk.yaml\n

    Each collector will also check if a matching file named, custom.yaml exists, and if it does, it will read that file and merge it with default.yaml. The custom.yaml file should be located beside the matching default.yaml file. ( eg. conf/zapi/custom.yaml is beside conf/zapi/default.yaml).

    Let's take a look at some examples.

    1. Define a poller that uses the default Zapi collector. Using the default template is the easiest and most used option.
    Pollers:\n  jamaica:\n    datacenter: munich\n    addr: 10.10.10.10\n    collectors:\n      - Zapi # will use conf/zapi/default.yaml and optionally merge with conf/zapi/custom.yaml\n
    1. Define a poller that uses the Zapi collector, but with a custom template file:
    Pollers:\n  jamaica:\n    datacenter: munich\n    addr: 10.10.10.10\n    collectors:\n      - ZapiPerf:\n          - limited.yaml # will use conf/zapiperf/limited.yaml\n        # more templates can be added, they will be merged\n
    "},{"location":"configure-templates/#object-templates","title":"Object Templates","text":"

    Object templates (example: conf/zapi/cdot/9.8.0/lun.yaml) describe what to collect and export. These templates are used by collectors to gather metrics and send them to your time-series db.

    Object templates are made up of the following parts:

    1. the name of the object (or resource) to collect
    2. the ZAPI or REST query used to collect the object
    3. a list of object counters to collect and how to export them

    Instead of editing one of the existing templates, it's better to extend one of them. That way, your custom template will not be overwritten when upgrading Harvest. For example, if you want to extend conf/zapi/cdot/9.8.0/aggr.yaml, first create a copy (e.g., conf/zapi/cdot/9.8.0/custom_aggr.yaml), and then tell Harvest to use your custom template by adding these lines to conf/zapi/custom.yaml:

    objects:\n  Aggregate: custom_aggr.yaml\n

    After restarting your pollers, aggr.yaml and custom_aggr.yaml will be merged.

    "},{"location":"configure-templates/#create-a-new-object-template","title":"Create a new object template","text":"

    In this example, imagine that Harvest doesn't already collect environment sensor data, and you wanted to collect it. Sensor does come from the environment-sensors-get-iter ZAPI. Here are the steps to add a new object template.

    Create the file conf/zapi/cdot/9.8.0/sensor.yaml (optionally replace 9.8.0 with the earliest version of ONTAP that supports sensor data. Refer to Harvest Versioned Templates for more information. Add the following content to your new sensor.yaml file.

    name: Sensor                      # this name must match the key in your custom.yaml file\nquery: environment-sensors-get-iter\nobject: sensor\n\nmetric_type: int64\n\ncounters:\n  environment-sensors-info:\n    - critical-high-threshold    => critical_high\n    - critical-low-threshold     => critical_low\n    - ^discrete-sensor-state     => discrete_state\n    - ^discrete-sensor-value     => discrete_value\n    - ^^node-name                => node\n    - ^^sensor-name              => sensor\n    - ^sensor-type               => type\n    - ^threshold-sensor-state    => threshold_state\n    - threshold-sensor-value     => threshold_value\n    - ^value-units               => unit\n    - ^warning-high-threshold    => warning_high\n    - ^warning-low-threshold     => warning_low\n\nexport_options:\n  include_all_labels: true\n
    "},{"location":"configure-templates/#enable-the-new-object-template","title":"Enable the new object template","text":"

    To enable the new sensor object template, create the conf/zapi/custom.yaml file with the lines shown below.

    objects:\n  Sensor: sensor.yaml                 # this key must match the name in your sensor.yaml file\n

    The Sensor key used in the custom.yaml must match the name defined in the sensor.yaml file. That mapping is what connects this object with its template. In the future, if you add more object templates, you can add those in your existing custom.yaml file.

    "},{"location":"configure-templates/#test-your-object-template-changes","title":"Test your object template changes","text":"

    Test your new Sensor template with a single poller like this:

    ./bin/harvest start <poller> --foreground --verbose --collectors Zapi --objects Sensor\n

    Replace <poller> with the name of one of your ONTAP pollers.

    Once you have confirmed that the new template works, restart any already running pollers that you want to use the new template(s).

    "},{"location":"configure-templates/#check-the-metrics","title":"Check the metrics","text":"

    If you are using the Prometheus exporter, you can scrape the poller's HTTP endpoint with curl or a web browser. E.g., my poller exports its data on port 15001. Adjust as needed for your exporter.

    curl -s 'http://localhost:15001/metrics' | grep ^sensor_  # sensor_ name matches the object: value in your sensor.yaml file.\n\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",critical_high=\"3664\",node=\"shopfloor-02\",sensor=\"P3.3V STBY\",type=\"voltage\",warning_low=\"3040\",critical_low=\"2960\",threshold_state=\"normal\",unit=\"mV\",warning_high=\"3568\"} 3280\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",sensor=\"P1.2V STBY\",type=\"voltage\",threshold_state=\"normal\",warning_high=\"1299\",warning_low=\"1105\",critical_low=\"1086\",node=\"shopfloor-02\",critical_high=\"1319\",unit=\"mV\"} 1193\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",unit=\"mV\",critical_high=\"15810\",critical_low=\"0\",node=\"shopfloor-02\",sensor=\"P12V STBY\",type=\"voltage\",threshold_state=\"normal\"} 11842\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",sensor=\"P12V STBY Curr\",type=\"current\",threshold_state=\"normal\",unit=\"mA\",critical_high=\"3182\",critical_low=\"0\",node=\"shopfloor-02\"} 748\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",critical_low=\"1470\",node=\"shopfloor-02\",sensor=\"Sysfan2 F2 Speed\",type=\"fan\",threshold_state=\"normal\",unit=\"RPM\",warning_low=\"1560\"} 2820\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",sensor=\"PSU2 Fan1 Speed\",type=\"fan\",threshold_state=\"normal\",unit=\"RPM\",warning_low=\"4600\",critical_low=\"4500\",node=\"shopfloor-01\"} 6900\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",sensor=\"PSU1 InPwr Monitor\",type=\"unknown\",threshold_state=\"normal\",unit=\"mW\",node=\"shopfloor-01\"} 132000\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",critical_high=\"58\",type=\"thermal\",unit=\"C\",warning_high=\"53\",critical_low=\"0\",node=\"shopfloor-01\",sensor=\"Bat Temp\",threshold_state=\"normal\",warning_low=\"5\"} 24\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",critical_high=\"9000\",node=\"shopfloor-01\",sensor=\"Bat Charge Volt\",type=\"voltage\",threshold_state=\"normal\",unit=\"mV\",warning_high=\"8900\"} 8200\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",node=\"shopfloor-02\",sensor=\"PSU1 InPwr Monitor\",type=\"unknown\",threshold_state=\"normal\",unit=\"mW\"} 132000\n
    "},{"location":"configure-templates/#extend-an-existing-object-template","title":"Extend an existing object template","text":""},{"location":"configure-templates/#how-to-extend-a-restrestperfstoragegridems-collectors-existing-object-template","title":"How to extend a Rest/RestPerf/StorageGRID/Ems collector's existing object template","text":"

    Instead of editing one of the existing templates, it's better to copy one and edit the copy. That way, your custom template will not be overwritten when upgrading Harvest. For example, if you want to change conf/rest/9.12.0/aggr.yaml, first create a copy (e.g., conf/rest/9.12.0/custom_aggr.yaml), then add these lines to conf/rest/custom.yaml:

    objects:\n  Aggregate: custom_aggr.yaml\n

    After restarting pollers, aggr.yaml will be ignored and the new, custom_aggr.yaml subtemplate will be used instead.

    "},{"location":"configure-templates/#how-to-extend-a-zapizapiperf-collectors-existing-object-template","title":"How to extend a Zapi/ZapiPerf collector's existing object template","text":"

    In this example, we want to extend one of the existing object templates that Harvest ships with, e.g. conf/zapi/cdot/9.8.0/lun.yaml and collect additional information as outlined below.

    Let's say you want to extend lun.yaml to:

    1. Increase client_timeout (You want to increase the default timeout of the lun ZAPI because it keeps timing out)
    2. Add additional counters, e.g. multiprotocol-type, application
    3. Add a new counter to the already collected lun metrics using the value_to_num plugin
    4. Add a new application instance_keys and labels to the collected metrics

    Let's assume the existing template is located at conf/zapi/cdot/9.8.0/lun.yaml and contains the following.

    name: Lun\nquery: lun-get-iter\nobject: lun\n\ncounters:\n  lun-info:\n    - ^node\n    - ^path\n    - ^qtree\n    - size\n    - size-used\n    - ^state\n    - ^^uuid\n    - ^volume\n    - ^vserver => svm\n\nplugins:\n  - LabelAgent:\n    # metric label zapi_value rest_value `default_value`\n    value_to_num:\n      - new_status state online online `0`\n    split:\n      - path `/` ,,,lun\n\nexport_options:\n  instance_keys:\n    - node\n    - qtree\n    - lun\n    - volume\n    - svm\n  instance_labels:\n    - state\n

    To extend the out-of-the-box lun.yaml template, create a conf/zapi/custom.yaml file if it doesn't already exist and add the lines shown below:

    objects:\n  Lun: custom_lun.yaml\n

    Create a new object template conf/zapi/cdot/9.8.0/custom_lun.yaml with the lines shown below.

    client_timeout: 5m\ncounters:\n  lun-info:\n    - ^multiprotocol-type\n    - ^application\n\nplugins:\n  - LabelAgent:\n    value_to_num:\n      - custom_status state online online `0`\n\nexport_options:\n  instance_keys:\n    - application\n

    When you restart your pollers, Harvest will take the out-of-the-box template (lun.yaml) and your new one (custom_lun.yaml) and merge them into the following:

    name: Lun\nquery: lun-get-iter\nobject: lun\ncounters:\n  lun-info:\n    - ^node\n    - ^path\n    - ^qtree\n    - size\n    - size-used\n    - ^state\n    - ^^uuid\n    - ^volume\n    - ^vserver => svm\n    - ^multiprotocol-type\n    - ^application\nplugins:\n  LabelAgent:\n    value_to_num:\n      - new_status state online online `0`\n      - custom_status state online online `0`\n    split:\n      - path `/` ,,,lun\nexport_options:\n  instance_keys:\n    - node\n    - qtree\n    - lun\n    - volume\n    - svm\n    - application\nclient_timeout: 5m\n

    To help understand the merging process and the resulting combined template, you can view the result with:

    bin/harvest doctor merge --template conf/zapi/cdot/9.8.0/lun.yaml --with conf/zapi/cdot/9.8.0/custom_lun.yaml\n
    "},{"location":"configure-templates/#replace-an-existing-object-template-for-zapizapiperf-collector","title":"Replace an existing object template for Zapi/ZapiPerf Collector","text":"

    You can only extend existing templates for Zapi/ZapiPerf Collector as explained above. If you need to replace one of the existing object templates, let us know on Discord or GitHub.

    "},{"location":"configure-templates/#harvest-versioned-templates","title":"Harvest Versioned Templates","text":"

    Harvest ships with a set of versioned templates tailored for specific versions of ONTAP. At runtime, Harvest uses a BestFit heuristic to pick the most appropriate template. The BestFit heuristic compares the list of Harvest templates with the ONTAP version and selects the best match. There are versioned templates for both the ZAPI and REST collectors. Below is an example of how the BestFit algorithm works - assume Harvest has these templated versions:

    • 9.6.0
    • 9.6.1
    • 9.8.0
    • 9.9.0
    • 9.10.1

    if you are monitoring a cluster at these versions, Harvest will select the indicated template:

    • ONTAP version 9.4.1, Harvest will select the templates for 9.6.0
    • ONTAP version 9.6.0, Harvest will select the templates for 9.6.0
    • ONTAP version 9.7.X, Harvest will select the templates for 9.6.1
    • ONTAP version 9.12, Harvest will select the templates for 9.10.1
    "},{"location":"configure-templates/#counters","title":"counters","text":"

    This section contains the complete or partial attribute tree of the queried API. Since the collector does not get counter metadata from the ONTAP system, two additional symbols are used for non-numeric attributes:

    • ^ used as a prefix indicates that the attribute should be stored as a label
    • ^^ indicates that the attribute is a label and an instance key (i.e., a label that uniquely identifies an instance, such as name, uuid). If a single label does not uniquely identify an instance, then multiple instance keys should be indicated.

    Additionally, the symbol => can be used to set a custom display name for both instance labels and numeric counters. Example:

    name: Spare\nquery: aggr-spare-get-iter\nobject: spare\ncollect_only_labels: true\ncounters:\n  aggr-spare-disk-info:\n    - ^^disk                                # creates label aggr-disk\n    - ^disk-type                            # creates label aggr-disk-type\n    - ^is-disk-zeroed   => is_disk_zeroed   # creates label is_disk_zeroed\n    - ^^original-owner  => original_owner   # creates label original_owner\nexport_options:\n  instance_keys:\n    - disk\n    - original_owner\n  instance_labels:\n    - disk_type\n    - is_disk_zeroed\n

    Harvest does its best to determine a unique display name for each template's label and metric. Instead of relying on this heuristic, it is better to be explicit in your templates and define a display name using the caret (^) mapping. For example, instead of this:

    aggr-spare-disk-info:\n    - ^^disk\n    - ^disk-type\n

    do this:

    aggr-spare-disk-info:\n    - ^^disk      => disk\n    - ^disk-type  => disk_type\n

    See also #585

    "},{"location":"configure-unix/","title":"Unix","text":"

    This collector polls resource usage by Harvest pollers on the local system. Collector might be extended in the future to monitor any local or remote process.

    "},{"location":"configure-unix/#target-system","title":"Target System","text":"

    The machine where Harvest is running (\"localhost\").

    "},{"location":"configure-unix/#requirements","title":"Requirements","text":"

    Collector requires any OS where the proc-filesystem is available. If you are a developer, you are welcome to add support for other platforms. Currently, supported platforms includes most Unix/Unix-like systems:

    • Android / Termux
    • DragonFly BSD
    • FreeBSD
    • IBM AIX
    • Linux
    • NetBSD
    • Plan9
    • Solaris

    (On FreeBSD and NetBSD the proc-filesystem needs to be manually mounted).

    "},{"location":"configure-unix/#parameters","title":"Parameters","text":"parameter type description default mount_point string, optional path to the proc filesystem `/proc"},{"location":"configure-unix/#metrics","title":"Metrics","text":"

    The Collector follows the Linux proc(5) manual to parse a static set of metrics. Unless otherwise stated, the metric has a scalar value:

    metric type unit description start_time counter, float64 seconds process uptime cpu_percent gauge, float64 percent CPU used since last poll memory_percent gauge, float64 percent Memory used (RSS) since last poll cpu histogram, float64 seconds CPU used since last poll (system, user, iowait) memory histogram, uint64 kB Memory used since last poll (rss, vms, swap, etc) io histogram, uint64 bytecount IOs performed by process:rchar, wchar, read_bytes, write_bytes - read/write IOssyscr, syscw - syscalls for IO operations net histogram, uint64 count/byte Different IO operations over network devices ctx histogram, uint64 count Number of context switched (voluntary, involuntary) threads counter, uint64 count Number of threads fds counter, uint64 count Number of file descriptors

    Additionally, the collector provides the following instance labels:

    label description poller name of the poller pid PID of the poller"},{"location":"configure-unix/#issues","title":"Issues","text":"
    • Collector will fail on WSL because some non-critical files, in the proc-filesystem, are not present.
    "},{"location":"configure-zapi/","title":"ZAPI","text":"

    What about REST?

    ZAPI will reach end of availability in ONTAP 9.13.1 released Q2 2023. Don't worry, Harvest has you covered. Switch to Harvest's REST collectors and collect identical metrics. See REST Strategy for more details.

    "},{"location":"configure-zapi/#zapi-collector","title":"Zapi Collector","text":"

    The Zapi collectors use the ZAPI protocol to collect data from ONTAP systems. The collector submits data as received from the target system, and does not perform any calculations or post-processing. Since the attributes of most APIs have an irregular tree structure, sometimes a plugin will be required to collect all metrics from an API.

    The ZapiPerf collector is an extension of this collector, therefore, they share many parameters and configuration settings.

    "},{"location":"configure-zapi/#target-system","title":"Target System","text":"

    Target system can be any cDot or 7Mode ONTAP system. Any version is supported, however the default configuration files may not completely match with older systems.

    "},{"location":"configure-zapi/#requirements","title":"Requirements","text":"

    No SDK or other requirements. It is recommended to create a read-only user for Harvest on the ONTAP system (see prepare monitored clusters for details)

    "},{"location":"configure-zapi/#metrics","title":"Metrics","text":"

    The collector collects a dynamic set of metrics. Since most ZAPIs have a tree structure, the collector converts that structure into a flat metric representation. No post-processing or calculation is performed on the collected data itself.

    As an example, the aggr-get-iter ZAPI provides the following partial attribute tree:

    aggr-attributes:\n  - aggr-raid-attributes:\n      - disk-count\n  - aggr-snapshot-attributes:\n      - files-total\n

    The Zapi collector will convert this tree into two \"flat\" metrics: aggr_raid_disk_count and aggr_snapshot_files_total. (The algorithm to generate a name for the metrics will attempt to keep it as simple as possible, but sometimes it's useful to manually set a short display name. See counters for more details.

    "},{"location":"configure-zapi/#parameters","title":"Parameters","text":"

    The parameters of the collector are distributed across three files:

    • Harvest configuration file (default: harvest.yml)
    • ZAPI configuration file (default: conf/zapi/default.yaml)
    • Each object has its own configuration file (located in conf/zapi/$version/)

    Except for addr and datacenter, all other parameters of the ZAPI collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level ones. This allows you to configure each object individually, or use the same parameters for all objects.

    The full set of parameters are described below.

    "},{"location":"configure-zapi/#collector-configuration-file","title":"Collector configuration file","text":"

    The parameters are similar to those of the ZapiPerf collector. Parameters different from ZapiPerf:

    parameter type description default jitter duration (Go-syntax), optional Each Harvest collector runs independently, which means that at startup, each collector may send its ZAPI queries at nearly the same time. To spread out the collector startup times over a broader period, you can use jitter to randomly distribute collector startup across a specified duration. For example, a jitter of 1m starts each collector after a random delay between 0 and 60 seconds. For more details, refer to this discussion. schedule required same as for ZapiPerf, but only two elements: instance and data (collector does not run a counter poll) no_max_records bool, optional don't add max-records to the ZAPI request collect_only_labels bool, optional don't look for numeric metrics, only submit labels (suppresses the ErrNoMetrics error) only_cluster_instance bool, optional don't look for instance keys and assume only instance is the cluster itself"},{"location":"configure-zapi/#object-configuration-file","title":"Object configuration file","text":"

    The Zapi collector does not have the parameters instance_key and override parameters. The optional parameter metric_type allows you to override the default metric type (uint64). The value of this parameter should be one of the metric types supported by the matrix data-structure.

    The Object configuration file (\"subtemplate\") should contain the following parameters:

    parameter type description default name string, required display name of the collector that will collect this object query string, required REST endpoint used to issue a REST request object string, required short name of the object counters string list of counters to collect (see notes below) plugins list plugins and their parameters to run on the collected data export_options list parameters to pass to exporters (see notes below)"},{"location":"configure-zapi/#counters","title":"Counters","text":"

    This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or histograms. The exact property of each counter is fetched from ONTAP and updated periodically.

    Some counters require a \"base-counter\" for post-processing. If the base-counter is missing, ZapiPerf will still run, but the missing data won't be exported.

    The display name of a counter can be changed with => (e.g., nfsv3_ops => ops). There's one conversion Harvest does for you by default, the instance_name counter will be renamed to the value of object.

    Counters that are stored as labels will only be exported if they are included in the export_options section.

    "},{"location":"configure-zapi/#export_options","title":"Export_options","text":"

    Parameters in this section tell the exporters how to handle the collected data.

    There are two different kinds of time-series that Harvest publishes: metrics and instance labels.

    • Metrics are numeric data with associated labels (key-value pairs). E.g. volume_read_ops_total{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\"} 123. The volume_read_ops_total metric is exporting three labels: cluster, node, and volume and the metric value is 123.
    • Instance labels are named after their associated config object (e.g., volume_labels, qtree_labels, etc.). There will be one instance label for each object instance, and each instance label will contain a set of associated labels (key-value pairs) that are defined in the templates instance_labels parameter. E.g. volume_labels{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\", svm=\"svm1\"} 1. The volume_labels instance label is exporting four labels: cluster, node, volume, and svm. Instance labels always export a metric value of 1.

    The export_options section allows you to define how to export these time-series.

    • instances_keys (list): display names of labels to export to both metric and instance labels. For example, if you list the svm counter under instances_keys, that key-value will be included in all time-series metrics and all instance-labels.
    • instance_labels (list): display names of labels to export with the corresponding instance label config object. For example, if you want the volume counter to be exported with the volume_labels instance label, you would list volume in the instance_labels section.
    • include_all_labels (bool): exports all labels for all time-series metrics. If there are no metrics defined in the template, this option will do nothing. This option also overrides the previous two parameters. See also collect_only_labels.
    "},{"location":"configure-zapi/#zapiperf-collector","title":"ZapiPerf Collector","text":""},{"location":"configure-zapi/#zapiperf","title":"ZapiPerf","text":"

    ZapiPerf collects performance metrics from ONTAP systems using the ZAPI protocol. The collector is designed to be easily extendable to collect new objects or to collect additional counters from already configured objects.

    This collector is an extension of the Zapi collector. The major difference between them is that ZapiPerf collects only the performance (perf) APIs. Additionally, ZapiPerf always calculates final values from the deltas of two subsequent polls.

    "},{"location":"configure-zapi/#metrics_1","title":"Metrics","text":"

    The collector collects a dynamic set of metrics. The metric values are calculated from two consecutive polls (therefore, no metrics are emitted after the first poll). The calculation algorithm depends on the property and base-counter attributes of each metric, the following properties are supported:

    property formula description raw x = xi no post-processing, value x is submitted as it is delta x = xi - xi-1 delta of two poll values, xi and xi-1 rate x = (xi - xi-1) / (ti - ti-1) delta divided by the interval of the two polls in seconds average x = (xi - xi-1) / (yi - yi-1) delta divided by the delta of the base counter y percent x = 100 * (xi - xi-1) / (yi - yi-1) average multiplied by 100"},{"location":"configure-zapi/#parameters_1","title":"Parameters","text":"

    The parameters of the collector are distributed across three files:

    • Harvest configuration file (default: harvest.yml)
    • ZapiPerf configuration file (default: conf/zapiperf/default.yaml)
    • Each object has its own configuration file (located in conf/zapiperf/cdot/ and conf/zapiperf/7mode/ for cDot and 7Mode systems respectively)

    Except for addr, datacenter and auth_style, all other parameters of the ZapiPerf collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level file. This allows the user to configure each object individually, or use the same parameters for all objects.

    The full set of parameters are described below.

    "},{"location":"configure-zapi/#zapiperf-configuration-file","title":"ZapiPerf configuration file","text":"

    This configuration file (the \"template\") contains a list of objects that should be collected and the filenames of their configuration (explained in the next section).

    Additionally, this file contains the parameters that are applied as defaults to all objects. (As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well).

    parameter type description default use_insecure_tls bool, optional skip verifying TLS certificate of the target system false client_timeout duration (Go-syntax) how long to wait for server responses 30s batch_size int, optional max instances per API request 500 latency_io_reqd int, optional threshold of IOPs for calculating latency metrics (latencies based on very few IOPs are unreliable) 10 jitter duration (Go-syntax), optional Each Harvest collector runs independently, which means that at startup, each collector may send its ZAPI queries at nearly the same time. To spread out the collector startup times over a broader period, you can use jitter to randomly distribute collector startup across a specified duration. For example, a jitter of 1m starts each collector after a random delay between 0 and 60 seconds. For more details, refer to this discussion. schedule list, required the poll frequencies of the collector/object, should include exactly these three elements in the exact same other: - counter duration (Go-syntax) poll frequency of updating the counter metadata cache (example value: 20m) - instance duration (Go-syntax) poll frequency of updating the instance cache (example value: 10m) - data duration (Go-syntax) poll frequency of updating the data cache (example value: 1m)Note Harvest allows defining poll intervals on sub-second level (e.g. 1ms), however keep in mind the following:
    • API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than client_timeout.
    • Small poll intervals will create significant workload on the ONTAP system, as many counters are aggregated on-demand.
    • Some metric values become less significant if they are calculated for very short intervals (e.g. latencies)

    The template should define objects in the objects section. Example:

    objects:\n  SystemNode: system_node.yaml\n  HostAdapter: hostadapter.yaml\n

    Note that for each object we only define the filename of the object configuration file. The object configuration files are located in subdirectories matching to the ONTAP version that was used to create these files. It is possible to have multiple version-subdirectories for multiple ONTAP versions. At runtime, the collector will select the object configuration file that closest matches to the version of the target ONTAP system. (A mismatch is tolerated since ZapiPerf will fetch and validate counter metadata from the system.)

    "},{"location":"configure-zapi/#object-configuration-file_1","title":"Object configuration file","text":"

    The Object configuration file (\"subtemplate\") should contain the following parameters:

    parameter type description default name string display name of the collector that will collect this object object string short name of the object query string raw object name used to issue a ZAPI request counters list list of counters to collect (see notes below) instance_key string label to use as instance key (either name or uuid) override list of key-value pairs override counter properties that we get from ONTAP (allows circumventing ZAPI bugs) plugins list plugins and their parameters to run on the collected data export_options list parameters to pass to exporters (see notes below)"},{"location":"configure-zapi/#counters_1","title":"counters","text":"

    This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or histograms. The exact property of each counter is fetched from ONTAP and updated periodically.

    Some counters require a \"base-counter\" for post-processing. If the base-counter is missing, ZapiPerf will still run, but the missing data won't be exported.

    The display name of a counter can be changed with => (e.g., nfsv3_ops => ops). There's one conversion Harvest does for you by default, the instance_name counter will be renamed to the value of object.

    Counters that are stored as labels will only be exported if they are included in the export_options section.

    "},{"location":"configure-zapi/#export_options_1","title":"export_options","text":"

    Parameters in this section tell the exporters how to handle the collected data.

    There are two different kinds of time-series that Harvest publishes: metrics and instance labels.

    • Metrics are numeric data with associated labels (key-value pairs). E.g. volume_read_ops_total{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\"} 123. The volume_read_ops_total metric is exporting three labels: cluster, node, and volume and the metric value is 123.
    • Instance labels are named after their associated config object (e.g., volume_labels, nic_labels, etc.). There will be one instance label for each object instance, and each instance label will contain a set of associated labels (key-value pairs) that are defined in the templates instance_labels parameter. E.g. volume_labels{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\", svm=\"svm1\"} 1. The volume_labels instance label is exporting four labels: cluster, node, volume, and svm. Instance labels always export a metric value of 1.
    Instance labels are rarely used with ZapiPerf templates

    They can be useful for exporting labels that are not associated with a metric value.

    The export_options section allows you to define how to export these time-series.

    • instances_keys (list): display names of labels to export to both metric and instance labels. For example, if you list the svm counter under instances_keys, that key-value will be included in all time-series metrics and all instance-labels.
    • instance_labels (list): display names of labels to export with the corresponding instance label config object. For example, if you want the volume counter to be exported with the volume_labels instance label, you would list volume in the instance_labels section.
    "},{"location":"configure-zapi/#filter","title":"Filter","text":"

    This guide provides instructions on how to use the filter feature in ZapiPerf. Filtering is useful when you need to query a subset of instances. For example, suppose you have a small number of high-value volumes from which you want Harvest to collect performance metrics every five seconds. Collecting data from all volumes at this frequency would be too resource-intensive. Therefore, filtering allows you to create/modify a template that includes only the high-value volumes.

    "},{"location":"configure-zapi/#objects-excluding-workload","title":"Objects (Excluding Workload)","text":"

    In ZapiPerf templates, you can set up filters under counters. Wildcards like * are useful if you don't want to specify all instances. Please note, ONTAP Zapi filtering does not support regular expressions, only wildcard matching with *.

    For instance, to filter volume performance instances by instance name where the name is NS_svm_nvme or contains Test, use the following configuration in ZapiPerf volume.yaml under counters:

    counters:\n  ...\n  - filter:\n     - instance_name=NS_svm_nvme|instance_name=*Test*\n

    You can define multiple values within the filter array. These will be interpreted as AND conditions by ONTAP. Alternatively, you can specify a complete expression within a single array element, as described in the ONTAP filtering section below.

    ONTAP Filtering Details

    For a better understanding of ONTAP's filtering mechanism, it allows the use of filter-data for the perf-object-instance-list-info-iter Zapi.

    The filter-data is a string that signifies filter data, adhering to the format: counter_name=counter_value. You can define multiple pairs, separated by either a comma (\",\") or a pipe (\"|\").

    Here's the interpretation:

    • A comma (\",\") signifies an AND operation.
    • A pipe (\"|\") signifies an OR operation.
    • The precedence order is AND first, followed by OR.

    For instance, the filter string instance_name=volA,vserver_name=vs1|vserver_name=vs2 translates to (instance_name=volA && vserver_name=vs1) || (vserver_name=vs2).

    This filter will return instances on Vserver vs1 named volA, and all instances on Vserver vs2.

    "},{"location":"configure-zapi/#workload-templates","title":"Workload Templates","text":"

    Performance workload templates require a different syntax because instances are retrieved from the qos-workload-get-iter ZAPI instead of perf-object-instance-list-info-iter.

    The qos-workload-get-iter ZAPI supports filtering on the following fields:

    • workload-uuid
    • workload-name
    • workload-class
    • wid
    • category
    • policy-group
    • vserver
    • volume
    • lun
    • file
    • qtree
    • read-ahead
    • max-throughput
    • min-throughput
    • is-adaptive
    • is-constituent

    You can include these fields under the filter parameter. For example, to filter Workload performance instances by workload-name where the name contains NS or Test and vserver is vs1, use the following configuration in ZapiPerf workload.yaml under counters:

    counters:\n  ...\n  - filter:\n      - workload-name: \"*NS*|*Test*\"\n      - vserver: vs1\n
    "},{"location":"configure-zapi/#partial-aggregation","title":"Partial Aggregation","text":"

    There are instances when ONTAP may report partial aggregate results for certain objects (for example, during a node outage). In such cases, the ZapiPerf Collector will skip the reporting of performance counters for the affected objects.

    To determine whether partial aggregation affects an object, check the numPartials entry in the Harvest logs. If numPartials is greater than zero, it indicates that partial aggregations have occurred for that object. e.g. Collected Poller=aff-251 collector=ZapiPerf:NFSv4 instances=56 numPartials=15

    "},{"location":"dashboards/","title":"Dashboards","text":"

    Harvest can be used to import dashboards to Grafana.

    The bin/harvest grafana utility requires the address (hostname or IP), port of the Grafana server, and a Grafana API token. The port can be omitted if Grafana is configured to redirect the URL. Use the -d flag to point to the directory that contains the dashboards.

    "},{"location":"dashboards/#grafana-api-token","title":"Grafana API token","text":"

    The utility tool asks for an API token which can be generated from the Grafana web-gui.

    Click on Configuration in the left menu bar (1), click on API Keys (2) and click on the New API Key button. Choose a Key name (3), choose Editor for role (4) and click on add (5). Copy the generated key and paste it in your terminal or add the token to the Tools section of your configuration file. (see below)

    For example, let's say your Grafana server is on http://my.grafana.server:3000 and you want to import the Prometheus-based dashboards from the grafana directory. You would run this:

    bin/harvest grafana import --addr my.grafana.server:3000\n

    Similarly, to export:

    bin/harvest grafana export --addr my.grafana.server:3000 --directory /path/to/export/directory --serverfolder grafanaFolderName\n

    By default, the dashboards are connected to a datasource named prometheus (case-sensitive). This is a datasource of the Prometheus type, defined in Grafana. However, despite the type, the datasource can have any name. If you have a Prometheus type datasource with a name different from prometheus, you can specify this name using the --datasource flag during import/export like this:

    bin/harvest grafana import --addr my.grafana.server:3000 --datasource custom_datasource_name\n
    "},{"location":"dashboards/#cli","title":"CLI","text":"

    The bin/harvest grafana tool includes CLI help when passing the --help command line argument flag like so:

    bin/harvest grafana import --help\n

    The labels argument requires more explanation.

    "},{"location":"dashboards/#labels","title":"Labels","text":"

    The grafana import --labels argument goes hand-in-hand with a poller's Labels section described here. Labels are used to add additional key-value pairs to a poller's metrics.

    When you run bin/harvest grafana import, you may optionally pass a set of labels like so:

    bin/harvest grafana import --labels org --labels dept

    This will cause Harvest to do the following for each dashboard: 1. Parse each dashboard and add a new variable for each label passed on the command line 2. Modify each dashboard variable to use the new label variable(s) in a chained query.

    Here's an example:

    bin/harvest grafana import --labels \"org,dept\"\n

    This will add the Org and Dept variables, as shown below, and modify the existing variables as shown.

    Results in

    "},{"location":"dashboards/#creating-a-custom-grafana-dashboard-with-harvest-metrics-stored-in-prometheus","title":"Creating a Custom Grafana Dashboard with Harvest Metrics Stored in Prometheus","text":"

    This guide assumes that you have already installed and configured Harvest, Prometheus, and Grafana. Instead of creating a new Grafana dashboard from scratch, you might find it more efficient to clone and modify an existing one. Alternatively, you can copy/paste an existing dashboard's panel from an existing dashboard into your new one.

    Harvest collects a wide range of metrics from ONTAP and StorageGRID clusters, which are documented here. These metrics can be used to create dashboards in Grafana.

    "},{"location":"dashboards/#step-1-confirm-that-prometheus-is-receiving-metrics-from-harvest","title":"Step 1: Confirm that Prometheus is Receiving Metrics from Harvest","text":"

    Before creating a dashboard, make sure the relevant metric is present via a PromQL query in the Prometheus UI. If the metric is not present, navigate to Status -> Targets in the Prometheus UI to check the state and any potential errors of the scrape target.

    "},{"location":"dashboards/#step-2-add-prometheus-as-a-data-source-in-grafana","title":"Step 2: Add Prometheus as a Data Source in Grafana","text":"

    If you haven't already, add Prometheus as a data source in Grafana:

    1. In the Grafana UI, go to Configuration > Data Sources.
    2. Click Add data source.
    3. Select Prometheus.
    4. Enter the URL of your Prometheus server, and click Save & Test.
    "},{"location":"dashboards/#step-3-create-a-new-dashboard","title":"Step 3: Create a New Dashboard","text":"

    Now you're ready to create a new dashboard:

    1. In the Grafana UI, click the + icon on the left menu and select Dashboard.
    2. Click Add new panel.
    "},{"location":"dashboards/#step-4-add-queries-to-visualize-harvest-metrics","title":"Step 4: Add Queries to Visualize Harvest Metrics","text":"

    In the new panel, you can add queries to visualize the Harvest metrics:

    1. In the query editor, select Prometheus as the data source.
    2. Write your query to visualize the Harvest counters. Prometheus uses a language called PromQL for querying data. The exact query will depend on the specific Harvest counters you want to visualize. You can refer to the Harvest metrics documentation for details on the available metrics.
    3. Adjust the visualization settings as needed, and click Apply to add the panel to the dashboard.
    "},{"location":"dashboards/#step-5-save-the-dashboard","title":"Step 5: Save the Dashboard","text":"

    Once you're satisfied with the panels and layout of your dashboard, don't forget to save it. You can then share it with others, or keep it for your own use.

    Remember, the specifics of these steps can vary depending on your exact setup and requirements. This guide provides a general approach, but you may need to adjust it for your situation.

    "},{"location":"influxdb-exporter/","title":"InfluxDB Exporter","text":"InfluxDB Install

    The information below describes how to setup Harvest's InfluxDB exporter. If you need help installing or setting up InfluxDB, check out their documentation.

    "},{"location":"influxdb-exporter/#overview","title":"Overview","text":"

    The InfluxDB Exporter will format metrics into the InfluxDB's line protocol and write it into a bucket. The Exporter is compatible with InfluxDB v2.0. For explanation about bucket, org and precision, see InfluxDB API documentation.

    If you are monitoring both CDOT and 7mode clusters, it is strongly recommended to use two different buckets.

    "},{"location":"influxdb-exporter/#parameters","title":"Parameters","text":"

    Overview of all parameters is provided below. Only one of url or addr should be provided and at least one of them is required. If addr is specified, it should be a valid TCP address or hostname of the InfluxDB server and should not include the scheme. When using addr, the bucket, org, and token key/values are required.

    addr only works with HTTP. If you need to use HTTPS, you should use url instead.

    If url is specified, you must add all arguments to the url. Harvest will do no additional processing and use exactly what you specify. ( e.g. url: https://influxdb.example.com:8086/write?db=netapp&u=user&p=pass&precision=2. When using url, the bucket, org, port, and precision fields will be ignored.

    parameter type description default url string URL of the database, format: SCHEME://HOST[:PORT] addr string address of the database, format: HOST (HTTP only) port int, optional port of the database 8086 bucket string, required with addr InfluxDB bucket to write org string, required with addr InfluxDB organization name precision string, required with addr Preferred timestamp precision in seconds 2 client_timeout int, optional client timeout in seconds 5 token string token for authentication"},{"location":"influxdb-exporter/#example","title":"Example","text":"

    snippet from harvest.yml using addr: (supports HTTP only))

    Exporters:\n  my_influx:\n    exporter: InfluxDB\n    addr: localhost\n    bucket: harvest\n    org: harvest\n    token: ZTTrt%24@#WNFM2VZTTNNT25wZWUdtUmhBZEdVUmd3dl@# \n

    snippet from harvest.yml using url: (supports both HTTP/HTTPS))

    Exporters:\n  influx2:\n    exporter: InfluxDB\n    url: https://localhost:8086/api/v2/write?org=harvest&bucket=harvest&precision=s\n    token: my-token== \n

    Notice: InfluxDB stores a token in ~/.influxdbv2/configs, but you can also retrieve it from the UI (usually serving on localhost:8086): click on \"Data\" on the left task bar, then on \"Tokens\".

    "},{"location":"license/","title":"License","text":"

    Harvest's License

    "},{"location":"manage-harvest/","title":"Manage Harvest Pollers","text":"

    Coming Soon

    "},{"location":"monitor-harvest/","title":"Monitor Harvest","text":""},{"location":"monitor-harvest/#harvest-metadata","title":"Harvest Metadata","text":"

    Harvest publishes metadata metrics about the key components of Harvest. Many of these metrics are used in the Harvest Metadata dashboard.

    If you want to understand more about these metrics, read on!

    Metrics are published for:

    • collectors
    • pollers
    • clusters being monitored
    • exporters

    Here's a high-level summary of the metadata metrics Harvest publishes with details below.

    Metric Description Units metadata_collector_api_time amount of time to collect data from monitored cluster object microseconds metadata_collector_instances number of objects collected from monitored cluster scalar metadata_collector_metrics number of counters collected from monitored cluster scalar metadata_collector_parse_time amount of time to parse XML, JSON, etc. for cluster object microseconds metadata_collector_plugin_time amount of time for all plugins to post-process metrics microseconds metadata_collector_poll_time amount of time it took for the poll to finish microseconds metadata_collector_task_time amount of time it took for each collector's subtasks to complete microseconds metadata_component_count number of metrics collected for each object scalar metadata_component_status status of the collector - 0 means running, 1 means standby, 2 means failed enum metadata_exporter_count number of metrics and labels exported scalar metadata_exporter_time amount of time it took to render, export, and serve exported data microseconds metadata_target_goroutines number of goroutines that exist within the poller scalar metadata_target_status status of the system being monitored. 0 means reachable, 1 means unreachable enum metadata_collector_calc_time amount of time it took to compute metrics between two successive polls, specifically using properties like raw, delta, rate, average, and percent. This metric is available for ZapiPerf/RestPerf collectors. microseconds metadata_collector_skips number of metrics that were not calculated between two successive polls. This metric is available for ZapiPerf/RestPerf collectors. scalar"},{"location":"monitor-harvest/#collector-metadata","title":"Collector Metadata","text":"

    A poller publishes the metadata metrics for each collector and exporter associated with it.

    Let's say we start a poller with the Zapi collector and the out-of-the-box default.yaml exporting metrics to Prometheus. That means you will be monitoring 22 different objects (uncommented lines in default.yaml as of 23.02).

    When we start this poller, we expect it to export 23 metadata_component_status metrics. One for each of the 22 objects, plus one for the Prometheus exporter.

    The following curl confirms there are 23 metadata_component_status metrics reported.

    curl -s http://localhost:12990/metrics | grep -v \"#\" | grep metadata_component_status | wc -l\n      23\n

    These metrics also tell us which collectors are in an standby or failed state. For example, filtering on components not in the running state shows the following since this cluster doesn't have any ClusterPeers, SecurityAuditDestinations, or SnapMirrors. The reason is listed as no instances and the metric value is 1 which means standby.

    curl -s http://localhost:12990/metrics | grep -v \"#\" | grep metadata_component_status | grep -Evo \"running\"\nmetadata_component_status{name=\"Zapi\", reason=\"no instances\",target=\"ClusterPeer\",type=\"collector\",version=\"23.04.1417\"} 1\nmetadata_component_status{name=\"Zapi\", reason=\"no instances\",target=\"SecurityAuditDestination\",type=\"collector\",version=\"23.04.1417\"} 1\nmetadata_component_status{name=\"Zapi\", reason=\"no instances\",target=\"SnapMirror\",type=\"collector\",version=\"23.04.1417\"} 1\n

    The log files for the poller show a similar story. The poller starts with 22 collectors, but drops to 19 after three of the collectors go to standby because there are no instances to collect.

    2023-04-17T13:14:18-04:00 INF ./poller.go:539 > updated status, up collectors: 22 (of 22), up exporters: 1 (of 1) Poller=u2\n2023-04-17T13:14:18-04:00 INF collector/collector.go:342 > no instances, entering standby Poller=u2 collector=Zapi:SecurityAuditDestination task=data\n2023-04-17T13:14:18-04:00 INF collector/collector.go:342 > no instances, entering standby Poller=u2 collector=Zapi:ClusterPeer task=data\n2023-04-17T13:14:18-04:00 INF collector/collector.go:342 > no instances, entering standby Poller=u2 collector=Zapi:SnapMirror task=data\n2023-04-17T13:15:18-04:00 INF ./poller.go:539 > updated status, up collectors: 19 (of 22), up exporters: 1 (of 1) Poller=u2\n
    "},{"location":"ontap-metrics/","title":"ONTAP Metrics","text":"

    This document describes how Harvest metrics relate to their relevant ONTAP ZAPI and REST mappings, including:

    • Details about which Harvest metrics each dashboard uses. These can be generated on demand by running bin/harvest grafana metrics. See #1577 for details.

    • More information about ONTAP REST performance counters can be found here.

    Creation Date : 2024-Aug-12\nONTAP Version: 9.15.1\n
    "},{"location":"ontap-metrics/#understanding-the-structure","title":"Understanding the structure","text":"

    Below is an annotated example of how to interpret the structure of each of the metrics.

    disk_io_queued Name of the metric exported by Harvest

    Number of I/Os queued to the disk but not yet issued Description of the ONTAP metric

    • API will be one of REST or ZAPI depending on which collector is used to collect the metric
    • Endpoint name of the REST or ZAPI API used to collect this metric
    • Metric name of the ONTAP metric Template path of the template that collects the metric

    Performance related metrics also include:

    • Unit the unit of the metric
    • Type describes how to calculate a cooked metric from two consecutive ONTAP raw metrics
    • Base some counters require a base counter for post-processing. When required, this property lists the base counter
    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#metrics","title":"Metrics","text":""},{"location":"ontap-metrics/#aggr_disk_busy","title":"aggr_disk_busy","text":"

    The utilization percent of the disk. aggr_disk_busy is disk_busy aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_capacity","title":"aggr_disk_capacity","text":"

    Disk capacity in MB. aggr_disk_capacity is disk_capacity aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_cp_read_chain","title":"aggr_disk_cp_read_chain","text":"

    Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_cp_read_chain is disk_cp_read_chain aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_cp_read_latency","title":"aggr_disk_cp_read_latency","text":"

    Average latency per block in microseconds for consistency point read operations. aggr_disk_cp_read_latency is disk_cp_read_latency aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_cp_reads","title":"aggr_disk_cp_reads","text":"

    Number of disk read operations initiated each second for consistency point processing. aggr_disk_cp_reads is disk_cp_reads aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_io_pending","title":"aggr_disk_io_pending","text":"

    Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_io_pending is disk_io_pending aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_io_queued","title":"aggr_disk_io_queued","text":"

    Number of I/Os queued to the disk but not yet issued. aggr_disk_io_queued is disk_io_queued aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_busy","title":"aggr_disk_max_busy","text":"

    The utilization percent of the disk. aggr_disk_max_busy is the maximum of disk_busy for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_capacity","title":"aggr_disk_max_capacity","text":"

    Disk capacity in MB. aggr_disk_max_capacity is the maximum of disk_capacity for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_cp_read_chain","title":"aggr_disk_max_cp_read_chain","text":"

    Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_max_cp_read_chain is the maximum of disk_cp_read_chain for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_cp_read_latency","title":"aggr_disk_max_cp_read_latency","text":"

    Average latency per block in microseconds for consistency point read operations. aggr_disk_max_cp_read_latency is the maximum of disk_cp_read_latency for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_cp_reads","title":"aggr_disk_max_cp_reads","text":"

    Number of disk read operations initiated each second for consistency point processing. aggr_disk_max_cp_reads is the maximum of disk_cp_reads for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_io_pending","title":"aggr_disk_max_io_pending","text":"

    Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_max_io_pending is the maximum of disk_io_pending for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_io_queued","title":"aggr_disk_max_io_queued","text":"

    Number of I/Os queued to the disk but not yet issued. aggr_disk_max_io_queued is the maximum of disk_io_queued for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_total_data","title":"aggr_disk_max_total_data","text":"

    Total throughput for user operations per second. aggr_disk_max_total_data is the maximum of disk_total_data for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_total_transfers","title":"aggr_disk_max_total_transfers","text":"

    Total number of disk operations involving data transfer initiated per second. aggr_disk_max_total_transfers is the maximum of disk_total_transfers for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_read_blocks","title":"aggr_disk_max_user_read_blocks","text":"

    Number of blocks transferred for user read operations per second. aggr_disk_max_user_read_blocks is the maximum of disk_user_read_blocks for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_read_chain","title":"aggr_disk_max_user_read_chain","text":"

    Average number of blocks transferred in each user read operation. aggr_disk_max_user_read_chain is the maximum of disk_user_read_chain for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_read_latency","title":"aggr_disk_max_user_read_latency","text":"

    Average latency per block in microseconds for user read operations. aggr_disk_max_user_read_latency is the maximum of disk_user_read_latency for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_reads","title":"aggr_disk_max_user_reads","text":"

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_max_user_reads is the maximum of disk_user_reads for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_write_blocks","title":"aggr_disk_max_user_write_blocks","text":"

    Number of blocks transferred for user write operations per second. aggr_disk_max_user_write_blocks is the maximum of disk_user_write_blocks for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_write_chain","title":"aggr_disk_max_user_write_chain","text":"

    Average number of blocks transferred in each user write operation. aggr_disk_max_user_write_chain is the maximum of disk_user_write_chain for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_write_latency","title":"aggr_disk_max_user_write_latency","text":"

    Average latency per block in microseconds for user write operations. aggr_disk_max_user_write_latency is the maximum of disk_user_write_latency for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_writes","title":"aggr_disk_max_user_writes","text":"

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_max_user_writes is the maximum of disk_user_writes for label aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_total_data","title":"aggr_disk_total_data","text":"

    Total throughput for user operations per second. aggr_disk_total_data is disk_total_data aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_total_transfers","title":"aggr_disk_total_transfers","text":"

    Total number of disk operations involving data transfer initiated per second. aggr_disk_total_transfers is disk_total_transfers aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_read_blocks","title":"aggr_disk_user_read_blocks","text":"

    Number of blocks transferred for user read operations per second. aggr_disk_user_read_blocks is disk_user_read_blocks aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_read_chain","title":"aggr_disk_user_read_chain","text":"

    Average number of blocks transferred in each user read operation. aggr_disk_user_read_chain is disk_user_read_chain aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_read_latency","title":"aggr_disk_user_read_latency","text":"

    Average latency per block in microseconds for user read operations. aggr_disk_user_read_latency is disk_user_read_latency aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_reads","title":"aggr_disk_user_reads","text":"

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_user_reads is disk_user_reads aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_write_blocks","title":"aggr_disk_user_write_blocks","text":"

    Number of blocks transferred for user write operations per second. aggr_disk_user_write_blocks is disk_user_write_blocks aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_write_chain","title":"aggr_disk_user_write_chain","text":"

    Average number of blocks transferred in each user write operation. aggr_disk_user_write_chain is disk_user_write_chain aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_write_latency","title":"aggr_disk_user_write_latency","text":"

    Average latency per block in microseconds for user write operations. aggr_disk_user_write_latency is disk_user_write_latency aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_writes","title":"aggr_disk_user_writes","text":"

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_user_writes is disk_user_writes aggregated by aggr.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_efficiency_savings","title":"aggr_efficiency_savings","text":"

    Space saved by storage efficiencies (logical_used - used)

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency.savings conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_efficiency_savings_wo_snapshots","title":"aggr_efficiency_savings_wo_snapshots","text":"

    Space saved by storage efficiencies (logical_used - used)

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots.savings conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_efficiency_savings_wo_snapshots_flexclones","title":"aggr_efficiency_savings_wo_snapshots_flexclones","text":"

    Space saved by storage efficiencies (logical_used - used)

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots_flexclones.savings conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_hybrid_cache_size_total","title":"aggr_hybrid_cache_size_total","text":"

    Total usable space in bytes of SSD cache. Only provided when hybrid_cache.enabled is 'true'.

    API Endpoint Metric Template REST api/storage/aggregates block_storage.hybrid_cache.size conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.hybrid-cache-size-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_hybrid_disk_count","title":"aggr_hybrid_disk_count","text":"

    Number of disks used in the cache tier of the aggregate. Only provided when hybrid_cache.enabled is 'true'.

    API Endpoint Metric Template REST api/storage/aggregates block_storage.hybrid_cache.disk_count conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_files_private_used","title":"aggr_inode_files_private_used","text":"

    Number of system metadata files used. If the referenced file system is restricted or offline, a value of 0 is returned.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.files_private_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.files-private-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_files_total","title":"aggr_inode_files_total","text":"

    Maximum number of user-visible files that this referenced file system can currently hold. If the referenced file system is restricted or offline, a value of 0 is returned.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.files_total conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.files-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_files_used","title":"aggr_inode_files_used","text":"

    Number of user-visible files used in the referenced file system. If the referenced file system is restricted or offline, a value of 0 is returned.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.files-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_inodefile_private_capacity","title":"aggr_inode_inodefile_private_capacity","text":"

    Number of files that can currently be stored on disk for system metadata files. This number will dynamically increase as more system files are created.This is an advanced property; there is an added computationl cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.file_private_capacity conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.inodefile-private-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_inodefile_public_capacity","title":"aggr_inode_inodefile_public_capacity","text":"

    Number of files that can currently be stored on disk for user-visible files. This number will dynamically increase as more user-visible files are created.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.file_public_capacity conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.inodefile-public-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_maxfiles_available","title":"aggr_inode_maxfiles_available","text":"

    The count of the maximum number of user-visible files currently allowable on the referenced file system.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.max_files_available conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.maxfiles-available conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_maxfiles_possible","title":"aggr_inode_maxfiles_possible","text":"

    The largest value to which the maxfiles-available parameter can be increased by reconfiguration, on the referenced file system.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.max_files_possible conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.maxfiles-possible conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_maxfiles_used","title":"aggr_inode_maxfiles_used","text":"

    The number of user-visible files currently in use on the referenced file system.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.max_files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.maxfiles-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_used_percent","title":"aggr_inode_used_percent","text":"

    The percentage of disk space currently in use based on user-visible file count on the referenced file system.

    API Endpoint Metric Template REST api/storage/aggregates inode_attributes.used_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.percent-inode-used-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_logical_used_wo_snapshots","title":"aggr_logical_used_wo_snapshots","text":"

    Logical used

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots.logical_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-logical-used-wo-snapshots conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_logical_used_wo_snapshots_flexclones","title":"aggr_logical_used_wo_snapshots_flexclones","text":"

    Logical used

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots_flexclones.logical_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-logical-used-wo-snapshots-flexclones conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_object_store_logical_used","title":"aggr_object_store_logical_used","text":"

    Logical space usage of aggregates in the attached object store.

    API Endpoint Metric Template REST api/private/cli/aggr/show-space object_store_logical_used conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_object_store_physical_used","title":"aggr_object_store_physical_used","text":"

    Physical space usage of aggregates in the attached object store.

    API Endpoint Metric Template REST api/private/cli/aggr/show-space object_store_physical_used conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_physical_used_wo_snapshots","title":"aggr_physical_used_wo_snapshots","text":"

    Total Data Reduction Physical Used Without Snapshots

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots.logical_used, space.efficiency_without_snapshots.savings conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshots conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_physical_used_wo_snapshots_flexclones","title":"aggr_physical_used_wo_snapshots_flexclones","text":"

    Total Data Reduction Physical Used without snapshots and flexclones

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots_flexclones.logical_used, space.efficiency_without_snapshots_flexclones.savings conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshots-flexclones conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_power","title":"aggr_power","text":"

    Power consumed by aggregate in Watts.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_primary_disk_count","title":"aggr_primary_disk_count","text":"

    Number of disks used in the aggregate. This includes parity disks, but excludes disks in the hybrid cache.

    API Endpoint Metric Template REST api/storage/aggregates block_storage.primary.disk_count conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_raid_disk_count","title":"aggr_raid_disk_count","text":"

    Number of disks in the aggregate.

    API Endpoint Metric Template REST api/storage/aggregates block_storage.primary.disk_count, block_storage.hybrid_cache.disk_count conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-raid-attributes.disk-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_raid_plex_count","title":"aggr_raid_plex_count","text":"

    Number of plexes in the aggregate

    API Endpoint Metric Template REST api/storage/aggregates block_storage.plexes.# conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-raid-attributes.plex-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_raid_size","title":"aggr_raid_size","text":"

    Option to specify the maximum number of disks that can be included in a RAID group.

    API Endpoint Metric Template REST api/storage/aggregates block_storage.primary.raid_size conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-raid-attributes.raid-size conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_files_total","title":"aggr_snapshot_files_total","text":"

    Total files allowed in Snapshot copies

    API Endpoint Metric Template REST api/storage/aggregates snapshot.files_total conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.files-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_files_used","title":"aggr_snapshot_files_used","text":"

    Total files created in Snapshot copies

    API Endpoint Metric Template REST api/storage/aggregates snapshot.files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.files-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_inode_used_percent","title":"aggr_snapshot_inode_used_percent","text":"

    The percentage of disk space currently in use based on user-visible file (inode) count on the referenced file system.

    API Endpoint Metric Template ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.percent-inode-used-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_maxfiles_available","title":"aggr_snapshot_maxfiles_available","text":"

    Maximum files available for Snapshot copies

    API Endpoint Metric Template REST api/storage/aggregates snapshot.max_files_available conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.maxfiles-available conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_maxfiles_possible","title":"aggr_snapshot_maxfiles_possible","text":"

    The largest value to which the maxfiles-available parameter can be increased by reconfiguration, on the referenced file system.

    API Endpoint Metric Template REST api/storage/aggregates snapshot.max_files_available, snapshot.max_files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.maxfiles-possible conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_maxfiles_used","title":"aggr_snapshot_maxfiles_used","text":"

    Files in use by Snapshot copies

    API Endpoint Metric Template REST api/storage/aggregates snapshot.max_files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.maxfiles-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_reserve_percent","title":"aggr_snapshot_reserve_percent","text":"

    Percentage of space reserved for Snapshot copies

    API Endpoint Metric Template REST api/storage/aggregates space.snapshot.reserve_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.snapshot-reserve-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_size_available","title":"aggr_snapshot_size_available","text":"

    Available space for Snapshot copies in bytes

    API Endpoint Metric Template REST api/storage/aggregates space.snapshot.available conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.size-available conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_size_total","title":"aggr_snapshot_size_total","text":"

    Total space for Snapshot copies in bytes

    API Endpoint Metric Template REST api/storage/aggregates space.snapshot.total conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.size-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_size_used","title":"aggr_snapshot_size_used","text":"

    Space used by Snapshot copies in bytes

    API Endpoint Metric Template REST api/storage/aggregates space.snapshot.used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.size-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_used_percent","title":"aggr_snapshot_used_percent","text":"

    Percentage of disk space used by Snapshot copies

    API Endpoint Metric Template REST api/storage/aggregates space.snapshot.used_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.percent-used-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_available","title":"aggr_space_available","text":"

    Space available in bytes.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.available conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.size-available conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_capacity_tier_used","title":"aggr_space_capacity_tier_used","text":"

    Used space in bytes in the cloud store. Only applicable for aggregates with a cloud store tier.

    API Endpoint Metric Template REST api/storage/aggregates space.cloud_storage.used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.capacity-tier-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_data_compacted_count","title":"aggr_space_data_compacted_count","text":"

    Amount of compacted data in bytes.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.data_compacted_count conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.data-compacted-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_data_compaction_saved","title":"aggr_space_data_compaction_saved","text":"

    Space saved in bytes by compacting the data.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.data_compaction_space_saved conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.data-compaction-space-saved conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_data_compaction_saved_percent","title":"aggr_space_data_compaction_saved_percent","text":"

    Percentage saved by compacting the data.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.data_compaction_space_saved_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.data-compaction-space-saved-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_performance_tier_inactive_user_data","title":"aggr_space_performance_tier_inactive_user_data","text":"

    The size that is physically used in the block storage and has a cold temperature, in bytes. This property is only supported if the aggregate is either attached to a cloud store or can be attached to a cloud store.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either block_storage.inactive_user_data or **.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.inactive_user_data conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.performance-tier-inactive-user-data conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_performance_tier_inactive_user_data_percent","title":"aggr_space_performance_tier_inactive_user_data_percent","text":"

    The percentage of inactive user data in the block storage. This property is only supported if the aggregate is either attached to a cloud store or can be attached to a cloud store.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either block_storage.inactive_user_data_percent or **.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.inactive_user_data_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.performance-tier-inactive-user-data-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_performance_tier_used","title":"aggr_space_performance_tier_used","text":"

    A summation of volume footprints (including volume guarantees), in bytes. This includes all of the volume footprints in the block_storage tier and the cloud_storage tier.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

    API Endpoint Metric Template REST api/storage/aggregates space.footprint conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_performance_tier_used_percent","title":"aggr_space_performance_tier_used_percent","text":"

    A summation of volume footprints inside the aggregate,as a percentage. A volume's footprint is the amount of space being used for the volume in the aggregate.

    API Endpoint Metric Template REST api/storage/aggregates space.footprint_percent conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_physical_used","title":"aggr_space_physical_used","text":"

    Total physical used size of an aggregate in bytes.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.physical_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.physical-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_physical_used_percent","title":"aggr_space_physical_used_percent","text":"

    Physical used percentage.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.physical_used_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.physical-used-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_reserved","title":"aggr_space_reserved","text":"

    The total disk space in bytes that is reserved on the referenced file system. The reserved space is already counted in the used space, so this element can be used to see what portion of the used space represents space reserved for future use.

    API Endpoint Metric Template ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.total-reserved-space conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_sis_saved","title":"aggr_space_sis_saved","text":"

    Amount of space saved in bytes by storage efficiency.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.volume_deduplication_space_saved conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.sis-space-saved conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_sis_saved_percent","title":"aggr_space_sis_saved_percent","text":"

    Percentage of space saved by storage efficiency.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.volume_deduplication_space_saved_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.sis-space-saved-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_sis_shared_count","title":"aggr_space_sis_shared_count","text":"

    Amount of shared bytes counted by storage efficiency.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.volume_deduplication_shared_count conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.sis-shared-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_total","title":"aggr_space_total","text":"

    Total usable space in bytes, not including WAFL reserve and aggregate Snapshot copy reserve.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.size conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.size-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_used","title":"aggr_space_used","text":"

    Space used or reserved in bytes. Includes volume guarantees and aggregate metadata.

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.size-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_used_percent","title":"aggr_space_used_percent","text":"

    The percentage of disk space currently in use on the referenced file system

    API Endpoint Metric Template REST api/storage/aggregates space.block_storage.used, space.block_storage.size conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.percent-used-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_total_logical_used","title":"aggr_total_logical_used","text":"

    Logical used

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency.logical_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-logical-used conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_total_physical_used","title":"aggr_total_physical_used","text":"

    Total Physical Used

    API Endpoint Metric Template REST api/storage/aggregates space.efficiency.logical_used, space.efficiency.savings conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-physical-used conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_volume_count","title":"aggr_volume_count","text":"

    The aggregate's volume count, which includes both FlexVols and FlexGroup constituents.

    API Endpoint Metric Template REST api/storage/aggregates volume_count conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-volume-count-attributes.flexvol-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#cifs_session_connection_count","title":"cifs_session_connection_count","text":"

    A counter used to track requests that are sent to the volumes to the node.

    API Endpoint Metric Template REST api/protocols/cifs/sessions connection_count conf/rest/9.8.0/cifs_session.yaml ZAPI cifs-session-get-iter cifs-session.connection-count conf/zapi/cdot/9.8.0/cifs_session.yaml"},{"location":"ontap-metrics/#cloud_target_used","title":"cloud_target_used","text":"

    The amount of cloud space used by all the aggregates attached to the target, in bytes. This field is only populated for FabricPool targets. The value is recalculated once every 5 minutes.

    API Endpoint Metric Template REST api/cloud/targets used conf/rest/9.12.0/cloud_target.yaml ZAPI aggr-object-store-config-get-iter aggr-object-store-config-info.used-space conf/zapi/cdot/9.10.0/aggr_object_store_config.yaml"},{"location":"ontap-metrics/#cluster_new_status","title":"cluster_new_status","text":"

    It is an indicator of the overall health status of the cluster, with a value of 1 indicating a healthy status and a value of 0 indicating an unhealthy status.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/status.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/status.yaml"},{"location":"ontap-metrics/#cluster_subsystem_outstanding_alerts","title":"cluster_subsystem_outstanding_alerts","text":"

    Number of outstanding alerts

    API Endpoint Metric Template REST api/private/cli/system/health/subsystem outstanding_alert_count conf/rest/9.12.0/subsystem.yaml ZAPI diagnosis-subsystem-config-get-iter diagnosis-subsystem-config-info.outstanding-alert-count conf/zapi/cdot/9.8.0/subsystem.yaml"},{"location":"ontap-metrics/#cluster_subsystem_suppressed_alerts","title":"cluster_subsystem_suppressed_alerts","text":"

    Number of suppressed alerts

    API Endpoint Metric Template REST api/private/cli/system/health/subsystem suppressed_alert_count conf/rest/9.12.0/subsystem.yaml ZAPI diagnosis-subsystem-config-get-iter diagnosis-subsystem-config-info.suppressed-alert-count conf/zapi/cdot/9.8.0/subsystem.yaml"},{"location":"ontap-metrics/#copy_manager_bce_copy_count_curr","title":"copy_manager_bce_copy_count_curr","text":"

    Current number of copy requests being processed by the Block Copy Engine.

    API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager block_copy_engine_current_copy_countUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager bce_copy_count_currUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#copy_manager_kb_copied","title":"copy_manager_kb_copied","text":"

    Sum of kilo-bytes copied.

    API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager KB_copiedUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager KB_copiedUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#copy_manager_ocs_copy_count_curr","title":"copy_manager_ocs_copy_count_curr","text":"

    Current number of copy requests being processed by the ONTAP copy subsystem.

    API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager ontap_copy_subsystem_current_copy_countUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager ocs_copy_count_currUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#copy_manager_sce_copy_count_curr","title":"copy_manager_sce_copy_count_curr","text":"

    Current number of copy requests being processed by the System Continuous Engineering.

    API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager system_continuous_engineering_current_copy_countUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager sce_copy_count_currUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#copy_manager_spince_copy_count_curr","title":"copy_manager_spince_copy_count_curr","text":"

    Current number of copy requests being processed by the SpinCE.

    API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager spince_current_copy_countUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager spince_copy_count_currUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#disk_busy","title":"disk_busy","text":"

    The utilization percent of the disk

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_bytes_per_sector","title":"disk_bytes_per_sector","text":"

    Bytes per sector.

    API Endpoint Metric Template REST api/storage/disks bytes_per_sector conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-inventory-info.bytes-per-sector conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_capacity","title":"disk_capacity","text":"

    Disk capacity in MB

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_cp_read_chain","title":"disk_cp_read_chain","text":"

    Average number of blocks transferred in each consistency point read operation during a CP

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_cp_read_latency","title":"disk_cp_read_latency","text":"

    Average latency per block in microseconds for consistency point read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_cp_reads","title":"disk_cp_reads","text":"

    Number of disk read operations initiated each second for consistency point processing

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_io_pending","title":"disk_io_pending","text":"

    Average number of I/Os issued to the disk for which we have not yet received the response

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_io_queued","title":"disk_io_queued","text":"

    Number of I/Os queued to the disk but not yet issued

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_power_on_hours","title":"disk_power_on_hours","text":"

    Hours powered on.

    API Endpoint Metric Template REST api/storage/disks stats.power_on_hours conf/rest/9.12.0/disk.yaml"},{"location":"ontap-metrics/#disk_sectors","title":"disk_sectors","text":"

    Number of sectors on the disk.

    API Endpoint Metric Template REST api/storage/disks sector_count conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-inventory-info.capacity-sectors conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_stats_average_latency","title":"disk_stats_average_latency","text":"

    Average I/O latency across all active paths, in milliseconds.

    API Endpoint Metric Template REST api/storage/disks stats.average_latency conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.average-latency conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_stats_io_kbps","title":"disk_stats_io_kbps","text":"

    Total Disk Throughput in KBPS Across All Active Paths

    API Endpoint Metric Template REST api/private/cli/disk disk_io_kbps_total conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.disk-io-kbps conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_stats_sectors_read","title":"disk_stats_sectors_read","text":"

    Number of Sectors Read

    API Endpoint Metric Template REST api/private/cli/disk sectors_read conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.sectors-read conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_stats_sectors_written","title":"disk_stats_sectors_written","text":"

    Number of Sectors Written

    API Endpoint Metric Template REST api/private/cli/disk sectors_written conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.sectors-written conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_total_data","title":"disk_total_data","text":"

    Total throughput for user operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_total_transfers","title":"disk_total_transfers","text":"

    Total number of disk operations involving data transfer initiated per second

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_uptime","title":"disk_uptime","text":"

    Number of seconds the drive has been powered on

    API Endpoint Metric Template REST api/storage/disks stats.power_on_hours, 60, 60 conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.power-on-time-interval conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_usable_size","title":"disk_usable_size","text":"

    Usable size of each disk, in bytes.

    API Endpoint Metric Template REST api/storage/disks usable_size conf/rest/9.12.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_read_blocks","title":"disk_user_read_blocks","text":"

    Number of blocks transferred for user read operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_read_chain","title":"disk_user_read_chain","text":"

    Average number of blocks transferred in each user read operation

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_read_latency","title":"disk_user_read_latency","text":"

    Average latency per block in microseconds for user read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_reads","title":"disk_user_reads","text":"

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_write_blocks","title":"disk_user_write_blocks","text":"

    Number of blocks transferred for user write operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_write_chain","title":"disk_user_write_chain","text":"

    Average number of blocks transferred in each user write operation

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_write_latency","title":"disk_user_write_latency","text":"

    Average latency per block in microseconds for user write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_writes","title":"disk_user_writes","text":"

    Number of disk write operations initiated each second for storing data or metadata associated with user requests

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#environment_sensor_average_ambient_temperature","title":"environment_sensor_average_ambient_temperature","text":"

    Average temperature of all ambient sensors for node in Celsius.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_average_fan_speed","title":"environment_sensor_average_fan_speed","text":"

    Average fan speed for node in rpm.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_average_temperature","title":"environment_sensor_average_temperature","text":"

    Average temperature of all non-ambient sensors for node in Celsius.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_max_fan_speed","title":"environment_sensor_max_fan_speed","text":"

    Maximum fan speed for node in rpm.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_max_temperature","title":"environment_sensor_max_temperature","text":"

    Maximum temperature of all non-ambient sensors for node in Celsius.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_min_ambient_temperature","title":"environment_sensor_min_ambient_temperature","text":"

    Minimum temperature of all ambient sensors for node in Celsius.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_min_fan_speed","title":"environment_sensor_min_fan_speed","text":"

    Minimum fan speed for node in rpm.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_min_temperature","title":"environment_sensor_min_temperature","text":"

    Minimum temperature of all non-ambient sensors for node in Celsius.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_power","title":"environment_sensor_power","text":"

    Power consumed by a node in Watts.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_threshold_value","title":"environment_sensor_threshold_value","text":"

    Provides the sensor reading.

    API Endpoint Metric Template REST api/cluster/sensors value conf/rest/9.12.0/sensor.yaml ZAPI environment-sensors-get-iter environment-sensors-info.threshold-sensor-value conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#external_service_op_num_not_found_responses","title":"external_service_op_num_not_found_responses","text":"

    Number of 'Not Found' responses for calls to this operation.

    API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_not_found_responsesUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_request_failures","title":"external_service_op_num_request_failures","text":"

    A cumulative count of all request failures.

    API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_request_failuresUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_requests_sent","title":"external_service_op_num_requests_sent","text":"

    Number of requests sent to this service.

    API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_requests_sentUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_responses_received","title":"external_service_op_num_responses_received","text":"

    Number of responses received from the server (does not include timeouts).

    API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_responses_receivedUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_successful_responses","title":"external_service_op_num_successful_responses","text":"

    Number of successful responses to this operation.

    API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_successful_responsesUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_timeouts","title":"external_service_op_num_timeouts","text":"

    Number of times requests to the server for this operation timed out, meaning no response was recevied in a given time period.

    API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_timeoutsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_request_latency","title":"external_service_op_request_latency","text":"

    Average latency of requests for operations of this type on this server.

    API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op request_latencyUnit: microsecType: averageBase: num_requests_sent conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_request_latency_hist","title":"external_service_op_request_latency_hist","text":"

    This histogram holds the latency values for requests of this operation to the specified server.

    API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op request_latency_histUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#fabricpool_average_latency","title":"fabricpool_average_latency","text":"

    This counter is deprecated.Average latencies executed during various phases of command execution. The execution-start latency represents the average time taken to start executing an operation. The request-prepare latency represent the average time taken to prepare the commplete request that needs to be sent to the server. The send latency represents the average time taken to send requests to the server. The execution-start-to-send-complete represents the average time taken to send an operation out since its execution started. The execution-start-to-first-byte-received represent the average time taken to receive the first byte of a response since the command's request execution started. These counters can be used to identify performance bottlenecks within the object store client module.

    API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op average_latencyUnit: Type: Base: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fabricpool_cloud_bin_op_latency_average","title":"fabricpool_cloud_bin_op_latency_average","text":"

    Cloud bin operation latency average in milliseconds.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_comp_aggr_vol_bin cloud_bin_op_latency_averageUnit: millisecType: rawBase: conf/restperf/9.12.0/wafl_comp_aggr_vol_bin.yaml ZAPI perf-object-get-instances wafl_comp_aggr_vol_bin cloud_bin_op_latency_averageUnit: millisecType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml"},{"location":"ontap-metrics/#fabricpool_cloud_bin_operation","title":"fabricpool_cloud_bin_operation","text":"

    Cloud bin operation counters.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_comp_aggr_vol_bin cloud_bin_opUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl_comp_aggr_vol_bin.yaml ZAPI perf-object-get-instances wafl_comp_aggr_vol_bin cloud_bin_operationUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml"},{"location":"ontap-metrics/#fabricpool_get_throughput_bytes","title":"fabricpool_get_throughput_bytes","text":"

    This counter is deprecated. Counter that indicates the throughput for GET command in bytes per second.

    API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op get_throughput_bytesUnit: Type: Base: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fabricpool_put_throughput_bytes","title":"fabricpool_put_throughput_bytes","text":"

    This counter is deprecated. Counter that indicates the throughput for PUT command in bytes per second.

    API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op put_throughput_bytesUnit: Type: Base: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fabricpool_stats","title":"fabricpool_stats","text":"

    This counter is deprecated. Counter that indicates the number of object store operations sent, and their success and failure counts. The objstore_client_op_name array indicate the operation name such as PUT, GET, etc. The objstore_client_op_stats_name array contain the total number of operations, their success and failure counter for each operation.

    API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op statsUnit: Type: Base: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fabricpool_throughput_ops","title":"fabricpool_throughput_ops","text":"

    Counter that indicates the throughput for commands in ops per second.

    API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op throughput_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fcp_avg_other_latency","title":"fcp_avg_other_latency","text":"

    Average latency for operations other than read and write

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_avg_read_latency","title":"fcp_avg_read_latency","text":"

    Average latency for read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_avg_write_latency","title":"fcp_avg_write_latency","text":"

    Average latency for write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_discarded_frames_count","title":"fcp_discarded_frames_count","text":"

    Number of discarded frames.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp discarded_frames_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port discarded_frames_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_fabric_connected_speed","title":"fcp_fabric_connected_speed","text":"

    The negotiated data rate between the target FC port and the fabric in gigabits per second.

    API Endpoint Metric Template REST api/network/fc/ports fabric.connected_speed conf/rest/9.6.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_int_count","title":"fcp_int_count","text":"

    Number of interrupts

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp interrupt_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port int_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_invalid_crc","title":"fcp_invalid_crc","text":"

    Number of invalid cyclic redundancy checks (CRC count)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp invalid.crcUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port invalid_crcUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_invalid_transmission_word","title":"fcp_invalid_transmission_word","text":"

    Number of invalid transmission words

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp invalid.transmission_wordUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port invalid_transmission_wordUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_isr_count","title":"fcp_isr_count","text":"

    Number of interrupt responses

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp isr.countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port isr_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_lif_avg_latency","title":"fcp_lif_avg_latency","text":"

    Average latency for FCP operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_avg_other_latency","title":"fcp_lif_avg_other_latency","text":"

    Average latency for operations other than read and write

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_avg_read_latency","title":"fcp_lif_avg_read_latency","text":"

    Average latency for read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_avg_write_latency","title":"fcp_lif_avg_write_latency","text":"

    Average latency for write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_other_ops","title":"fcp_lif_other_ops","text":"

    Number of operations that are not read or write.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_read_data","title":"fcp_lif_read_data","text":"

    Amount of data read from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_read_ops","title":"fcp_lif_read_ops","text":"

    Number of read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_total_ops","title":"fcp_lif_total_ops","text":"

    Total number of operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_write_data","title":"fcp_lif_write_data","text":"

    Amount of data written to the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_write_ops","title":"fcp_lif_write_ops","text":"

    Number of write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_link_down","title":"fcp_link_down","text":"

    Number of times the Fibre Channel link was lost

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp link.downUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port link_downUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_link_failure","title":"fcp_link_failure","text":"

    Number of link failures

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp link_failureUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port link_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_link_up","title":"fcp_link_up","text":"

    Number of times the Fibre Channel link was established

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp link.upUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port link_upUnit: noneType: deltaBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_loss_of_signal","title":"fcp_loss_of_signal","text":"

    Number of times this port lost signal

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp loss_of_signalUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port loss_of_signalUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_loss_of_sync","title":"fcp_loss_of_sync","text":"

    Number of times this port lost sync

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp loss_of_syncUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port loss_of_syncUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_max_speed","title":"fcp_max_speed","text":"

    The maximum speed supported by the FC port in gigabits per second.

    API Endpoint Metric Template REST api/network/fc/ports speed.maximum conf/rest/9.6.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_other_latency","title":"fcp_nvmf_avg_other_latency","text":"

    Average latency for operations other than read and write (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_other_latencyUnit: microsecType: averageBase: nvmf.other_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_other_latencyUnit: microsecType: averageBase: nvmf_other_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_read_latency","title":"fcp_nvmf_avg_read_latency","text":"

    Average latency for read operations (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_read_latencyUnit: microsecType: averageBase: nvmf.read_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_read_latencyUnit: microsecType: averageBase: nvmf_read_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_remote_other_latency","title":"fcp_nvmf_avg_remote_other_latency","text":"

    Average latency for remote operations other than read and write (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_remote_other_latencyUnit: microsecType: averageBase: nvmf_remote.other_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_remote_other_latencyUnit: microsecType: averageBase: nvmf_remote_other_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_remote_read_latency","title":"fcp_nvmf_avg_remote_read_latency","text":"

    Average latency for remote read operations (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_remote_read_latencyUnit: microsecType: averageBase: nvmf_remote.read_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_remote_read_latencyUnit: microsecType: averageBase: nvmf_remote_read_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_remote_write_latency","title":"fcp_nvmf_avg_remote_write_latency","text":"

    Average latency for remote write operations (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_remote_write_latencyUnit: microsecType: averageBase: nvmf_remote.write_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_remote_write_latencyUnit: microsecType: averageBase: nvmf_remote_write_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_write_latency","title":"fcp_nvmf_avg_write_latency","text":"

    Average latency for write operations (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_write_latencyUnit: microsecType: averageBase: nvmf.write_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_write_latencyUnit: microsecType: averageBase: nvmf_write_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_caw_data","title":"fcp_nvmf_caw_data","text":"

    Amount of CAW data sent to the storage system (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.caw_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_caw_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_caw_ops","title":"fcp_nvmf_caw_ops","text":"

    Number of FC-NVMe CAW operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.caw_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_caw_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_command_slots","title":"fcp_nvmf_command_slots","text":"

    Number of command slots that have been used by initiators logging into this port. This shows the command fan-in on the port.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.command_slotsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_command_slotsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_other_ops","title":"fcp_nvmf_other_ops","text":"

    Number of NVMF operations that are not read or write.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_read_data","title":"fcp_nvmf_read_data","text":"

    Amount of data read from the storage system (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_read_ops","title":"fcp_nvmf_read_ops","text":"

    Number of FC-NVMe read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_caw_data","title":"fcp_nvmf_remote_caw_data","text":"

    Amount of remote CAW data sent to the storage system (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.caw_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_caw_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_caw_ops","title":"fcp_nvmf_remote_caw_ops","text":"

    Number of FC-NVMe remote CAW operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.caw_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_caw_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_other_ops","title":"fcp_nvmf_remote_other_ops","text":"

    Number of NVMF remote operations that are not read or write.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_read_data","title":"fcp_nvmf_remote_read_data","text":"

    Amount of remote data read from the storage system (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_read_ops","title":"fcp_nvmf_remote_read_ops","text":"

    Number of FC-NVMe remote read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_total_data","title":"fcp_nvmf_remote_total_data","text":"

    Amount of remote FC-NVMe traffic to and from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_total_ops","title":"fcp_nvmf_remote_total_ops","text":"

    Total number of remote FC-NVMe operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_write_data","title":"fcp_nvmf_remote_write_data","text":"

    Amount of remote data written to the storage system (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_write_ops","title":"fcp_nvmf_remote_write_ops","text":"

    Number of FC-NVMe remote write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_total_data","title":"fcp_nvmf_total_data","text":"

    Amount of FC-NVMe traffic to and from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_total_ops","title":"fcp_nvmf_total_ops","text":"

    Total number of FC-NVMe operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_write_data","title":"fcp_nvmf_write_data","text":"

    Amount of data written to the storage system (FC-NVMe)

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_write_ops","title":"fcp_nvmf_write_ops","text":"

    Number of FC-NVMe write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_other_ops","title":"fcp_other_ops","text":"

    Number of operations that are not read or write.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_prim_seq_err","title":"fcp_prim_seq_err","text":"

    Number of primitive sequence errors

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp primitive_seq_errUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port prim_seq_errUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_queue_full","title":"fcp_queue_full","text":"

    Number of times a queue full condition occurred.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp queue_fullUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port queue_fullUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_read_data","title":"fcp_read_data","text":"

    Amount of data read from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_read_ops","title":"fcp_read_ops","text":"

    Number of read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_reset_count","title":"fcp_reset_count","text":"

    Number of physical port resets

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp reset_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port reset_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_shared_int_count","title":"fcp_shared_int_count","text":"

    Number of shared interrupts

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp shared_interrupt_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port shared_int_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_spurious_int_count","title":"fcp_spurious_int_count","text":"

    Number of spurious interrupts

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp spurious_interrupt_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port spurious_int_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_threshold_full","title":"fcp_threshold_full","text":"

    Number of times the total number of outstanding commands on the port exceeds the threshold supported by this port.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp threshold_fullUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port threshold_fullUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_total_data","title":"fcp_total_data","text":"

    Amount of FCP traffic to and from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_total_ops","title":"fcp_total_ops","text":"

    Total number of FCP operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_write_data","title":"fcp_write_data","text":"

    Amount of data written to the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_write_ops","title":"fcp_write_ops","text":"

    Number of write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/fcp write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcvi_firmware_invalid_crc_count","title":"fcvi_firmware_invalid_crc_count","text":"

    Firmware reported invalid CRC count

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.invalid_crc_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_invalid_crcUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_invalid_transmit_word_count","title":"fcvi_firmware_invalid_transmit_word_count","text":"

    Firmware reported invalid transmit word count

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.invalid_transmit_word_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_invalid_xmit_wordsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_link_failure_count","title":"fcvi_firmware_link_failure_count","text":"

    Firmware reported link failure count

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.link_failure_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_link_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_loss_of_signal_count","title":"fcvi_firmware_loss_of_signal_count","text":"

    Firmware reported loss of signal count

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.loss_of_signal_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_loss_of_signalUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_loss_of_sync_count","title":"fcvi_firmware_loss_of_sync_count","text":"

    Firmware reported loss of sync count

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.loss_of_sync_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_loss_of_syncUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_systat_discard_frames","title":"fcvi_firmware_systat_discard_frames","text":"

    Firmware reported SyStatDiscardFrames value

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.systat.discard_framesUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_SyStatDiscardFramesUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_hard_reset_count","title":"fcvi_hard_reset_count","text":"

    Number of times hard reset of FCVI adapter got issued.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi hard_reset_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi hard_reset_cntUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_rdma_write_avg_latency","title":"fcvi_rdma_write_avg_latency","text":"

    Average RDMA write I/O latency.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi rdma.write_average_latencyUnit: microsecType: averageBase: rdma.write_ops conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi rdma_write_avg_latencyUnit: microsecType: averageBase: rdma_write_ops conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_rdma_write_ops","title":"fcvi_rdma_write_ops","text":"

    Number of RDMA write I/Os issued per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi rdma.write_opsUnit: noneType: rateBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi rdma_write_opsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_rdma_write_throughput","title":"fcvi_rdma_write_throughput","text":"

    RDMA write throughput in bytes per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi rdma.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi rdma_write_throughputUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_soft_reset_count","title":"fcvi_soft_reset_count","text":"

    Number of times soft reset of FCVI adapter got issued.

    API Endpoint Metric Template REST api/cluster/counter/tables/fcvi soft_reset_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi soft_reset_cntUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#flashcache_accesses","title":"flashcache_accesses","text":"

    External cache accesses per second

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache accessesUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj accessesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_disk_reads_replaced","title":"flashcache_disk_reads_replaced","text":"

    Estimated number of disk reads per second replaced by cache

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache disk_reads_replacedUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj disk_reads_replacedUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_evicts","title":"flashcache_evicts","text":"

    Number of blocks evicted from the external cache to make room for new blocks

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache evictsUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj evictsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit","title":"flashcache_hit","text":"

    Number of WAFL buffers served off the external cache

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.totalUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hitUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_directory","title":"flashcache_hit_directory","text":"

    Number of directory buffers served off the external cache

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.directoryUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_directoryUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_indirect","title":"flashcache_hit_indirect","text":"

    Number of indirect file buffers served off the external cache

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.indirectUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_indirectUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_metadata_file","title":"flashcache_hit_metadata_file","text":"

    Number of metadata file buffers served off the external cache

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.metadata_fileUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_metadata_fileUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_normal_lev0","title":"flashcache_hit_normal_lev0","text":"

    Number of normal level 0 WAFL buffers served off the external cache

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.normal_level_zeroUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_normal_lev0Unit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_percent","title":"flashcache_hit_percent","text":"

    External cache hit rate

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.percentUnit: percentType: averageBase: accesses conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_percentUnit: percentType: percentBase: accesses conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_inserts","title":"flashcache_inserts","text":"

    Number of WAFL buffers inserted into the external cache

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache insertsUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj insertsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_invalidates","title":"flashcache_invalidates","text":"

    Number of blocks invalidated in the external cache

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache invalidatesUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj invalidatesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss","title":"flashcache_miss","text":"

    External cache misses

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.totalUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj missUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss_directory","title":"flashcache_miss_directory","text":"

    External cache misses accessing directory buffers

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.directoryUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj miss_directoryUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss_indirect","title":"flashcache_miss_indirect","text":"

    External cache misses accessing indirect file buffers

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.indirectUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj miss_indirectUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss_metadata_file","title":"flashcache_miss_metadata_file","text":"

    External cache misses accessing metadata file buffers

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.metadata_fileUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj miss_metadata_fileUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss_normal_lev0","title":"flashcache_miss_normal_lev0","text":"

    External cache misses accessing normal level 0 buffers

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.normal_level_zeroUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj miss_normal_lev0Unit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_usage","title":"flashcache_usage","text":"

    Percentage of blocks in external cache currently containing valid data

    API Endpoint Metric Template REST api/cluster/counter/tables/external_cache usageUnit: percentType: rawBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj usageUnit: percentType: rawBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashpool_cache_stats","title":"flashpool_cache_stats","text":"

    Automated Working-set Analyzer (AWA) per-interval pseudo cache statistics for the most recent intervals. The number of intervals defined as recent is CM_WAFL_HYAS_INT_DIS_CNT. This array is a table with fields corresponding to the enum type of hyas_cache_stat_type_t.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_sizer cache_statsUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_sizer.yaml ZAPI perf-object-get-instances wafl_hya_sizer cache_statsUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/wafl_hya_sizer.yaml"},{"location":"ontap-metrics/#flashpool_evict_destage_rate","title":"flashpool_evict_destage_rate","text":"

    Number of block destage per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate evict_destage_rateUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr evict_destage_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_evict_remove_rate","title":"flashpool_evict_remove_rate","text":"

    Number of block free per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate evict_remove_rateUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr evict_remove_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_hya_read_hit_latency_average","title":"flashpool_hya_read_hit_latency_average","text":"

    Average of RAID I/O latency on read hit.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate hya_read_hit_latency_averageUnit: millisecType: averageBase: hya_read_hit_latency_count conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr hya_read_hit_latency_averageUnit: millisecType: averageBase: hya_read_hit_latency_count conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_hya_read_miss_latency_average","title":"flashpool_hya_read_miss_latency_average","text":"

    Average read miss latency.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate hya_read_miss_latency_averageUnit: millisecType: averageBase: hya_read_miss_latency_count conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr hya_read_miss_latency_averageUnit: millisecType: averageBase: hya_read_miss_latency_count conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_hya_write_hdd_latency_average","title":"flashpool_hya_write_hdd_latency_average","text":"

    Average write latency to HDD.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate hya_write_hdd_latency_averageUnit: millisecType: averageBase: hya_write_hdd_latency_count conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr hya_write_hdd_latency_averageUnit: millisecType: averageBase: hya_write_hdd_latency_count conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_hya_write_ssd_latency_average","title":"flashpool_hya_write_ssd_latency_average","text":"

    Average of RAID I/O latency on write to SSD.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate hya_write_ssd_latency_averageUnit: millisecType: averageBase: hya_write_ssd_latency_count conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr hya_write_ssd_latency_averageUnit: millisecType: averageBase: hya_write_ssd_latency_count conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_read_cache_ins_rate","title":"flashpool_read_cache_ins_rate","text":"

    Cache insert rate blocks/sec.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate read_cache_insert_rateUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr read_cache_ins_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_read_ops_replaced","title":"flashpool_read_ops_replaced","text":"

    Number of HDD read operations replaced by SSD reads per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate read_ops_replacedUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr read_ops_replacedUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_read_ops_replaced_percent","title":"flashpool_read_ops_replaced_percent","text":"

    Percentage of HDD read operations replace by SSD.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate read_ops_replaced_percentUnit: percentType: percentBase: read_ops_total conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr read_ops_replaced_percentUnit: percentType: percentBase: read_ops_total conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_available","title":"flashpool_ssd_available","text":"

    Total SSD blocks available.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_availableUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_availableUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_read_cached","title":"flashpool_ssd_read_cached","text":"

    Total read cached SSD blocks.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_read_cachedUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_read_cachedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_total","title":"flashpool_ssd_total","text":"

    Total SSD blocks.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_totalUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_totalUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_total_used","title":"flashpool_ssd_total_used","text":"

    Total SSD blocks used.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_total_usedUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_total_usedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_write_cached","title":"flashpool_ssd_write_cached","text":"

    Total write cached SSD blocks.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_write_cachedUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_write_cachedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_wc_write_blks_total","title":"flashpool_wc_write_blks_total","text":"

    Number of write-cache blocks written per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate wc_write_blocks_totalUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr wc_write_blks_totalUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_write_blks_replaced","title":"flashpool_write_blks_replaced","text":"

    Number of HDD write blocks replaced by SSD writes per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate write_blocks_replacedUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr write_blks_replacedUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_write_blks_replaced_percent","title":"flashpool_write_blks_replaced_percent","text":"

    Percentage of blocks overwritten to write-cache among all disk writes.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate write_blocks_replaced_percentUnit: percentType: averageBase: estimated_write_blocks_total conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr write_blks_replaced_percentUnit: percentType: averageBase: est_write_blks_total conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flexcache_blocks_requested_from_client","title":"flexcache_blocks_requested_from_client","text":"

    Total number of blocks requested from client

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume blocks_requested_from_clientUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_blocks_retrieved_from_origin","title":"flexcache_blocks_retrieved_from_origin","text":"

    Total number of blocks retrieved from origin

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume blocks_retrieved_from_originUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_evict_rw_cache_skipped_reason_disconnected","title":"flexcache_evict_rw_cache_skipped_reason_disconnected","text":"

    Total number of read-write cache evict operations skipped because cache is disconnected.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume evict_rw_cache_skipped_reason_disconnectedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_evict_skipped_reason_config_noent","title":"flexcache_evict_skipped_reason_config_noent","text":"

    Total number of evict operation is skipped because cache config is not available.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume evict_skipped_reason_config_noentUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_evict_skipped_reason_disconnected","title":"flexcache_evict_skipped_reason_disconnected","text":"

    Total number of evict operation is skipped because cache is disconnected.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume evict_skipped_reason_disconnectedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_evict_skipped_reason_offline","title":"flexcache_evict_skipped_reason_offline","text":"

    Total number of evict operation is skipped because cache volume is offline.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume evict_skipped_reason_offlineUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_invalidate_skipped_reason_config_noent","title":"flexcache_invalidate_skipped_reason_config_noent","text":"

    Total number of invalidate operation is skipped because cache config is not available.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume invalidate_skipped_reason_config_noentUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_invalidate_skipped_reason_disconnected","title":"flexcache_invalidate_skipped_reason_disconnected","text":"

    Total number of invalidate operation is skipped because cache is disconnected.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume invalidate_skipped_reason_disconnectedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_invalidate_skipped_reason_offline","title":"flexcache_invalidate_skipped_reason_offline","text":"

    Total number of invalidate operation is skipped because cache volume is offline.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume invalidate_skipped_reason_offlineUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_miss_percent","title":"flexcache_miss_percent","text":"

    This metric represents the percentage of block requests from a client that resulted in a \"miss\" in the FlexCache. A \"miss\" occurs when the requested data is not found in the cache and has to be retrieved from the origin volume.

    API Endpoint Metric Template ZAPI flexcache_per_volume blocks_retrieved_from_origin, blocks_requested_from_clientUnit: Type: Base: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_retry_skipped_reason_initiator_retrieve","title":"flexcache_nix_retry_skipped_reason_initiator_retrieve","text":"

    Total retry nix operations skipped because the initiator is retrieve operation.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_retry_skipped_reason_initiator_retrieveUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_skipped_reason_config_noent","title":"flexcache_nix_skipped_reason_config_noent","text":"

    Total number of nix operation is skipped because cache config is not available.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_skipped_reason_config_noentUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_skipped_reason_disconnected","title":"flexcache_nix_skipped_reason_disconnected","text":"

    Total number of nix operation is skipped because cache is disconnected.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_skipped_reason_disconnectedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_skipped_reason_in_progress","title":"flexcache_nix_skipped_reason_in_progress","text":"

    Total nix operations skipped because of an in-progress nix.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_skipped_reason_in_progressUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_skipped_reason_offline","title":"flexcache_nix_skipped_reason_offline","text":"

    Total number of nix operation is skipped because cache volume is offline.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_skipped_reason_offlineUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_reconciled_data_entries","title":"flexcache_reconciled_data_entries","text":"

    Total number of reconciled data entries at cache side.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume reconciled_data_entriesUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_reconciled_lock_entries","title":"flexcache_reconciled_lock_entries","text":"

    Total number of reconciled lock entries at cache side.

    API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume reconciled_lock_entriesUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_size","title":"flexcache_size","text":"

    Physical size of the FlexCache. The recommended size for a FlexCache is 10% of the origin volume. The minimum FlexCache constituent size is 1GB.

    API Endpoint Metric Template REST api/storage/flexcache/flexcaches size conf/rest/9.12.0/flexcache.yaml ZAPI flexcache-get-iter flexcache-info.size conf/zapi/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#headroom_aggr_current_latency","title":"headroom_aggr_current_latency","text":"

    This is the storage aggregate average latency per message at the disk level.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate current_latencyUnit: microsecType: averageBase: current_ops conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr current_latencyUnit: microsecType: averageBase: current_ops conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_current_ops","title":"headroom_aggr_current_ops","text":"

    Total number of I/Os processed by the aggregate per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate current_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr current_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_current_utilization","title":"headroom_aggr_current_utilization","text":"

    This is the storage aggregate average utilization of all the data disks in the aggregate.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate current_utilizationUnit: percentType: percentBase: current_utilization_denominator conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr current_utilizationUnit: percentType: percentBase: current_utilization_total conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_ewma_daily","title":"headroom_aggr_ewma_daily","text":"

    Daily exponential weighted moving average.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate ewma.dailyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr ewma_dailyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_ewma_hourly","title":"headroom_aggr_ewma_hourly","text":"

    Hourly exponential weighted moving average.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate ewma.hourlyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr ewma_hourlyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_ewma_monthly","title":"headroom_aggr_ewma_monthly","text":"

    Monthly exponential weighted moving average.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate ewma.monthlyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr ewma_monthlyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_ewma_weekly","title":"headroom_aggr_ewma_weekly","text":"

    Weekly exponential weighted moving average.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate ewma.weeklyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr ewma_weeklyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_optimal_point_confidence_factor","title":"headroom_aggr_optimal_point_confidence_factor","text":"

    The confidence factor for the optimal point value based on the observed resource latency and utilization.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate optimal_point.confidence_factorUnit: noneType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr optimal_point_confidence_factorUnit: noneType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_optimal_point_latency","title":"headroom_aggr_optimal_point_latency","text":"

    The latency component of the optimal point of the latency/utilization curve.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate optimal_point.latencyUnit: microsecType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr optimal_point_latencyUnit: microsecType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_optimal_point_ops","title":"headroom_aggr_optimal_point_ops","text":"

    The ops component of the optimal point derived from the latency/utilzation curve.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate optimal_point.opsUnit: per_secType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr optimal_point_opsUnit: per_secType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_optimal_point_utilization","title":"headroom_aggr_optimal_point_utilization","text":"

    The utilization component of the optimal point of the latency/utilization curve.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate optimal_point.utilizationUnit: noneType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr optimal_point_utilizationUnit: noneType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_cpu_current_latency","title":"headroom_cpu_current_latency","text":"

    Current operation latency of the resource.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu current_latencyUnit: microsecType: averageBase: current_ops conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu current_latencyUnit: microsecType: averageBase: current_ops conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_current_ops","title":"headroom_cpu_current_ops","text":"

    Total number of operations per second (also referred to as dblade ops).

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu current_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu current_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_current_utilization","title":"headroom_cpu_current_utilization","text":"

    Average processor utilization across all processors in the system.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu current_utilizationUnit: percentType: percentBase: elapsed_time conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu current_utilizationUnit: percentType: percentBase: current_utilization_total conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_ewma_daily","title":"headroom_cpu_ewma_daily","text":"

    Daily exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu ewma.dailyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu ewma_dailyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_ewma_hourly","title":"headroom_cpu_ewma_hourly","text":"

    Hourly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu ewma.hourlyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu ewma_hourlyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_ewma_monthly","title":"headroom_cpu_ewma_monthly","text":"

    Monthly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu ewma.monthlyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu ewma_monthlyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_ewma_weekly","title":"headroom_cpu_ewma_weekly","text":"

    Weekly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu ewma.weeklyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu ewma_weeklyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_optimal_point_confidence_factor","title":"headroom_cpu_optimal_point_confidence_factor","text":"

    Confidence factor for the optimal point value based on the observed resource latency and utilization. The possible values are: 0 - unknown, 1 - low, 2 - medium, 3 - high. This counter can provide an average confidence factor over a range of time.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu optimal_point.confidence_factorUnit: noneType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu optimal_point_confidence_factorUnit: noneType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_optimal_point_latency","title":"headroom_cpu_optimal_point_latency","text":"

    Latency component of the optimal point of the latency/utilization curve. This counter can provide an average latency over a range of time.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu optimal_point.latencyUnit: microsecType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu optimal_point_latencyUnit: microsecType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_optimal_point_ops","title":"headroom_cpu_optimal_point_ops","text":"

    Ops component of the optimal point derived from the latency/utilization curve. This counter can provide an average ops over a range of time.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu optimal_point.opsUnit: per_secType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu optimal_point_opsUnit: per_secType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_optimal_point_utilization","title":"headroom_cpu_optimal_point_utilization","text":"

    Utilization component of the optimal point of the latency/utilization curve. This counter can provide an average utilization over a range of time.

    API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu optimal_point.utilizationUnit: noneType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu optimal_point_utilizationUnit: noneType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#hostadapter_bytes_read","title":"hostadapter_bytes_read","text":"

    Bytes read through a host adapter

    API Endpoint Metric Template REST api/cluster/counter/tables/host_adapter bytes_readUnit: per_secType: rateBase: conf/restperf/9.12.0/hostadapter.yaml ZAPI perf-object-get-instances hostadapter bytes_readUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/hostadapter.yaml"},{"location":"ontap-metrics/#hostadapter_bytes_written","title":"hostadapter_bytes_written","text":"

    Bytes written through a host adapter

    API Endpoint Metric Template REST api/cluster/counter/tables/host_adapter bytes_writtenUnit: per_secType: rateBase: conf/restperf/9.12.0/hostadapter.yaml ZAPI perf-object-get-instances hostadapter bytes_writtenUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/hostadapter.yaml"},{"location":"ontap-metrics/#iscsi_lif_avg_latency","title":"iscsi_lif_avg_latency","text":"

    Average latency for iSCSI operations

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif average_latencyUnit: microsecType: averageBase: cmd_transferred conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif avg_latencyUnit: microsecType: averageBase: cmd_transfered conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_avg_other_latency","title":"iscsi_lif_avg_other_latency","text":"

    Average latency for operations other than read and write (for example, Inquiry, Report LUNs, SCSI Task Management Functions)

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif average_other_latencyUnit: microsecType: averageBase: iscsi_other_ops conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif avg_other_latencyUnit: microsecType: averageBase: iscsi_other_ops conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_avg_read_latency","title":"iscsi_lif_avg_read_latency","text":"

    Average latency for read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif average_read_latencyUnit: microsecType: averageBase: iscsi_read_ops conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif avg_read_latencyUnit: microsecType: averageBase: iscsi_read_ops conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_avg_write_latency","title":"iscsi_lif_avg_write_latency","text":"

    Average latency for write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif average_write_latencyUnit: microsecType: averageBase: iscsi_write_ops conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif avg_write_latencyUnit: microsecType: averageBase: iscsi_write_ops conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_cmd_transfered","title":"iscsi_lif_cmd_transfered","text":"

    Command transferred by this iSCSI connection

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif cmd_transferredUnit: noneType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif cmd_transferedUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_iscsi_other_ops","title":"iscsi_lif_iscsi_other_ops","text":"

    iSCSI other operations per second on this logical interface (LIF)

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif iscsi_other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif iscsi_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_iscsi_read_ops","title":"iscsi_lif_iscsi_read_ops","text":"

    iSCSI read operations per second on this logical interface (LIF)

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif iscsi_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif iscsi_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_iscsi_write_ops","title":"iscsi_lif_iscsi_write_ops","text":"

    iSCSI write operations per second on this logical interface (LIF)

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif iscsi_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif iscsi_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_protocol_errors","title":"iscsi_lif_protocol_errors","text":"

    Number of protocol errors from iSCSI sessions on this logical interface (LIF)

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif protocol_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif protocol_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_read_data","title":"iscsi_lif_read_data","text":"

    Amount of data read from the storage system in bytes

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_write_data","title":"iscsi_lif_write_data","text":"

    Amount of data written to the storage system in bytes

    API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iw_avg_latency","title":"iw_avg_latency","text":"

    Average RDMA I/O latency.

    API Endpoint Metric Template REST api/cluster/counter/tables/iwarp average_latencyUnit: microsecType: averageBase: ops conf/restperf/9.14.1/iwarp.yaml ZAPI perf-object-get-instances iwarp iw_avg_latencyUnit: microsecType: averageBase: iw_ops conf/zapiperf/cdot/9.8.0/iwarp.yaml"},{"location":"ontap-metrics/#iw_ops","title":"iw_ops","text":"

    Number of RDMA I/Os issued.

    API Endpoint Metric Template REST api/cluster/counter/tables/iwarp opsUnit: noneType: rateBase: conf/restperf/9.14.1/iwarp.yaml ZAPI perf-object-get-instances iwarp iw_opsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/iwarp.yaml"},{"location":"ontap-metrics/#iw_read_ops","title":"iw_read_ops","text":"

    Number of RDMA read I/Os issued.

    API Endpoint Metric Template REST api/cluster/counter/tables/iwarp read_opsUnit: noneType: rateBase: conf/restperf/9.14.1/iwarp.yaml ZAPI perf-object-get-instances iwarp iw_read_opsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/iwarp.yaml"},{"location":"ontap-metrics/#iw_write_ops","title":"iw_write_ops","text":"

    Number of RDMA write I/Os issued.

    API Endpoint Metric Template REST api/cluster/counter/tables/iwarp write_opsUnit: noneType: rateBase: conf/restperf/9.14.1/iwarp.yaml ZAPI perf-object-get-instances iwarp iw_write_opsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/iwarp.yaml"},{"location":"ontap-metrics/#lif_recv_data","title":"lif_recv_data","text":"

    Number of bytes received per second

    API Endpoint Metric Template REST api/cluster/counter/tables/lif received_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif recv_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_recv_errors","title":"lif_recv_errors","text":"

    Number of received Errors per second

    API Endpoint Metric Template REST api/cluster/counter/tables/lif received_errorsUnit: per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif recv_errorsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_recv_packet","title":"lif_recv_packet","text":"

    Number of packets received per second

    API Endpoint Metric Template REST api/cluster/counter/tables/lif received_packetsUnit: per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif recv_packetUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_sent_data","title":"lif_sent_data","text":"

    Number of bytes sent per second

    API Endpoint Metric Template REST api/cluster/counter/tables/lif sent_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif sent_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_sent_errors","title":"lif_sent_errors","text":"

    Number of sent errors per second

    API Endpoint Metric Template REST api/cluster/counter/tables/lif sent_errorsUnit: per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif sent_errorsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_sent_packet","title":"lif_sent_packet","text":"

    Number of packets sent per second

    API Endpoint Metric Template REST api/cluster/counter/tables/lif sent_packetsUnit: per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif sent_packetUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lun_avg_read_latency","title":"lun_avg_read_latency","text":"

    Average read latency in microseconds for all operations on the LUN

    API Endpoint Metric Template REST api/cluster/counter/tables/lun average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_avg_write_latency","title":"lun_avg_write_latency","text":"

    Average write latency in microseconds for all operations on the LUN

    API Endpoint Metric Template REST api/cluster/counter/tables/lun average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_avg_xcopy_latency","title":"lun_avg_xcopy_latency","text":"

    Average latency in microseconds for xcopy requests

    API Endpoint Metric Template REST api/cluster/counter/tables/lun average_xcopy_latencyUnit: microsecType: averageBase: xcopy_requests conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun avg_xcopy_latencyUnit: microsecType: averageBase: xcopy_reqs conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_caw_reqs","title":"lun_caw_reqs","text":"

    Number of compare and write requests

    API Endpoint Metric Template REST api/cluster/counter/tables/lun caw_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun caw_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_enospc","title":"lun_enospc","text":"

    Number of operations receiving ENOSPC errors

    API Endpoint Metric Template REST api/cluster/counter/tables/lun enospcUnit: noneType: deltaBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun enospcUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_queue_full","title":"lun_queue_full","text":"

    Queue full responses

    API Endpoint Metric Template REST api/cluster/counter/tables/lun queue_fullUnit: per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun queue_fullUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_read_align_histo","title":"lun_read_align_histo","text":"

    Histogram of WAFL read alignment (number sectors off WAFL block start)

    API Endpoint Metric Template REST api/cluster/counter/tables/lun read_align_histogramUnit: percentType: percentBase: read_ops_sent conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun read_align_histoUnit: percentType: percentBase: read_ops_sent conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_read_data","title":"lun_read_data","text":"

    Read bytes

    API Endpoint Metric Template REST api/cluster/counter/tables/lun read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_read_ops","title":"lun_read_ops","text":"

    Number of read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/lun read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_read_partial_blocks","title":"lun_read_partial_blocks","text":"

    Percentage of reads whose size is not a multiple of WAFL block size

    API Endpoint Metric Template REST api/cluster/counter/tables/lun read_partial_blocksUnit: percentType: percentBase: read_ops conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun read_partial_blocksUnit: percentType: percentBase: read_ops conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_remote_bytes","title":"lun_remote_bytes","text":"

    I/O to or from a LUN which is not owned by the storage system handling the I/O.

    API Endpoint Metric Template REST api/cluster/counter/tables/lun remote_bytesUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun remote_bytesUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_remote_ops","title":"lun_remote_ops","text":"

    Number of operations received by a storage system that does not own the LUN targeted by the operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/lun remote_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun remote_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_size","title":"lun_size","text":"

    The total provisioned size of the LUN. The LUN size can be increased but not be made smaller using the REST interface.The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The actual minimum and maxiumum sizes vary depending on the ONTAP version, ONTAP platform and the available space in the containing volume and aggregate.For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    API Endpoint Metric Template REST api/storage/luns space.size conf/rest/9.12.0/lun.yaml ZAPI lun-get-iter lun-info.size conf/zapi/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_size_used","title":"lun_size_used","text":"

    The amount of space consumed by the main data stream of the LUN.This value is the total space consumed in the volume by the LUN, including filesystem overhead, but excluding prefix and suffix streams. Due to internal filesystem overhead and the many ways SAN filesystems and applications utilize blocks within a LUN, this value does not necessarily reflect actual consumption/availability from the perspective of the filesystem or application. Without specific knowledge of how the LUN blocks are utilized outside of ONTAP, this property should not be used as an indicator for an out-of-space condition.For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    API Endpoint Metric Template REST api/storage/luns space.used conf/rest/9.12.0/lun.yaml ZAPI lun-get-iter lun-info.size-used conf/zapi/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_size_used_percent","title":"lun_size_used_percent","text":"

    This metric represents the percentage of a LUN that is currently being used.

    API Endpoint Metric Template REST api/storage/luns size_used, size conf/rest/9.12.0/lun.yaml ZAPI lun-get-iter size_used, size conf/zapi/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_unmap_reqs","title":"lun_unmap_reqs","text":"

    Number of unmap command requests

    API Endpoint Metric Template REST api/cluster/counter/tables/lun unmap_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun unmap_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_write_align_histo","title":"lun_write_align_histo","text":"

    Histogram of WAFL write alignment (number of sectors off WAFL block start)

    API Endpoint Metric Template REST api/cluster/counter/tables/lun write_align_histogramUnit: percentType: percentBase: write_ops_sent conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun write_align_histoUnit: percentType: percentBase: write_ops_sent conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_write_data","title":"lun_write_data","text":"

    Write bytes

    API Endpoint Metric Template REST api/cluster/counter/tables/lun write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_write_ops","title":"lun_write_ops","text":"

    Number of write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/lun write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_write_partial_blocks","title":"lun_write_partial_blocks","text":"

    Percentage of writes whose size is not a multiple of WAFL block size

    API Endpoint Metric Template REST api/cluster/counter/tables/lun write_partial_blocksUnit: percentType: percentBase: write_ops conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun write_partial_blocksUnit: percentType: percentBase: write_ops conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_writesame_reqs","title":"lun_writesame_reqs","text":"

    Number of write same command requests

    API Endpoint Metric Template REST api/cluster/counter/tables/lun writesame_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun writesame_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_writesame_unmap_reqs","title":"lun_writesame_unmap_reqs","text":"

    Number of write same commands requests with unmap bit set

    API Endpoint Metric Template REST api/cluster/counter/tables/lun writesame_unmap_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun writesame_unmap_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_xcopy_reqs","title":"lun_xcopy_reqs","text":"

    Total number of xcopy operations on the LUN

    API Endpoint Metric Template REST api/cluster/counter/tables/lun xcopy_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun xcopy_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#metadata_collector_api_time","title":"metadata_collector_api_time","text":"

    amount of time to collect data from monitored cluster object

    API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_calc_time","title":"metadata_collector_calc_time","text":"

    amount of time it took to compute metrics between two successive polls, specifically using properties like raw, delta, rate, average, and percent. This metric is available for ZapiPerf/RestPerf collectors.

    API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_instances","title":"metadata_collector_instances","text":"

    number of objects collected from monitored cluster

    API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_collector_metrics","title":"metadata_collector_metrics","text":"

    number of counters collected from monitored cluster

    API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_collector_parse_time","title":"metadata_collector_parse_time","text":"

    amount of time to parse XML, JSON, etc. for cluster object

    API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_plugin_time","title":"metadata_collector_plugin_time","text":"

    amount of time for all plugins to post-process metrics

    API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_poll_time","title":"metadata_collector_poll_time","text":"

    amount of time it took for the poll to finish

    API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_skips","title":"metadata_collector_skips","text":"

    number of metrics that were not calculated between two successive polls. This metric is available for ZapiPerf/RestPerf collectors.

    API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_collector_task_time","title":"metadata_collector_task_time","text":"

    amount of time it took for each collector's subtasks to complete

    API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_component_count","title":"metadata_component_count","text":"

    number of metrics collected for each object

    API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_component_status","title":"metadata_component_status","text":"

    status of the collector - 0 means running, 1 means standby, 2 means failed

    API Endpoint Metric Template REST NA Harvest generatedUnit: enum NA ZAPI NA Harvest generatedUnit: enum NA"},{"location":"ontap-metrics/#metadata_exporter_count","title":"metadata_exporter_count","text":"

    number of metrics and labels exported

    API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_exporter_time","title":"metadata_exporter_time","text":"

    amount of time it took to render, export, and serve exported data

    API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_target_goroutines","title":"metadata_target_goroutines","text":"

    number of goroutines that exist within the poller

    API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_target_status","title":"metadata_target_status","text":"

    status of the system being monitored. 0 means reachable, 1 means unreachable

    API Endpoint Metric Template REST NA Harvest generatedUnit: enum NA ZAPI NA Harvest generatedUnit: enum NA"},{"location":"ontap-metrics/#metrocluster_check_aggr_status","title":"metrocluster_check_aggr_status","text":"

    Detail of the type of diagnostic operation run for the Aggregate with diagnostic operation result.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/metrocluster_check.yaml"},{"location":"ontap-metrics/#metrocluster_check_cluster_status","title":"metrocluster_check_cluster_status","text":"

    Detail of the type of diagnostic operation run for the Cluster with diagnostic operation result.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/metrocluster_check.yaml"},{"location":"ontap-metrics/#metrocluster_check_node_status","title":"metrocluster_check_node_status","text":"

    Detail of the type of diagnostic operation run for the Node with diagnostic operation result.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/metrocluster_check.yaml"},{"location":"ontap-metrics/#metrocluster_check_volume_status","title":"metrocluster_check_volume_status","text":"

    Detail of the type of diagnostic operation run for the Volume with diagnostic operation result.

    API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/metrocluster_check.yaml"},{"location":"ontap-metrics/#namespace_avg_other_latency","title":"namespace_avg_other_latency","text":"

    Average other ops latency in microseconds for all operations on the Namespace

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_avg_read_latency","title":"namespace_avg_read_latency","text":"

    Average read latency in microseconds for all operations on the Namespace

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_avg_write_latency","title":"namespace_avg_write_latency","text":"

    Average write latency in microseconds for all operations on the Namespace

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_block_size","title":"namespace_block_size","text":"

    The size of blocks in the namespace in bytes.Valid in POST when creating an NVMe namespace that is not a clone of another. Disallowed in POST when creating a namespace clone. Valid in POST.

    API Endpoint Metric Template REST api/storage/namespaces space.block_size conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter nvme-namespace-info.block-size conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_other_ops","title":"namespace_other_ops","text":"

    Number of other operations

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_read_data","title":"namespace_read_data","text":"

    Read bytes

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_read_ops","title":"namespace_read_ops","text":"

    Number of read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_remote_bytes","title":"namespace_remote_bytes","text":"

    Remote read bytes

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace remote.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace remote_bytesUnit: Type: Base: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_remote_ops","title":"namespace_remote_ops","text":"

    Number of remote read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace remote.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace remote_opsUnit: Type: Base: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_size","title":"namespace_size","text":"

    The total provisioned size of the NVMe namespace. Valid in POST and PATCH. The NVMe namespace size can be increased but not be made smaller using the REST interface.The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The maximum size is variable with respect to large NVMe namespace support in ONTAP. If large namespaces are supported, the maximum size is 128 TB (140737488355328 bytes) and if not supported, the maximum size is just under 16 TB (17557557870592 bytes). The minimum size supported is always 4096 bytes.For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    API Endpoint Metric Template REST api/storage/namespaces space.size conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter nvme-namespace-info.size conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_size_available","title":"namespace_size_available","text":"

    This metric represents the amount of available space in a namespace.

    API Endpoint Metric Template REST api/storage/namespaces size, size_used conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter size, size_used conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_size_available_percent","title":"namespace_size_available_percent","text":"

    This metric represents the percentage of available space in a namespace.

    API Endpoint Metric Template REST api/storage/namespaces size_available, size conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter size_available, size conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_size_used","title":"namespace_size_used","text":"

    The amount of space consumed by the main data stream of the NVMe namespace.This value is the total space consumed in the volume by the NVMe namespace, including filesystem overhead, but excluding prefix and suffix streams. Due to internal filesystem overhead and the many ways NVMe filesystems and applications utilize blocks within a namespace, this value does not necessarily reflect actual consumption/availability from the perspective of the filesystem or application. Without specific knowledge of how the namespace blocks are utilized outside of ONTAP, this property should not be used and an indicator for an out-of-space condition.For more information, see Size properties in the docs section of the ONTAP REST API documentation.

    API Endpoint Metric Template REST api/storage/namespaces space.used conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter nvme-namespace-info.size-used conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_write_data","title":"namespace_write_data","text":"

    Write bytes

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_write_ops","title":"namespace_write_ops","text":"

    Number of write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/namespace write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#ndmp_session_data_bytes_processed","title":"ndmp_session_data_bytes_processed","text":"

    Indicates the NDMP data bytes processed.

    API Endpoint Metric Template REST api/protocols/ndmp/sessions data.bytes_processed conf/rest/9.7.0/ndmp_session.yaml"},{"location":"ontap-metrics/#ndmp_session_mover_bytes_moved","title":"ndmp_session_mover_bytes_moved","text":"

    Indicates the NDMP mover bytes moved.

    API Endpoint Metric Template REST api/protocols/ndmp/sessions mover.bytes_moved conf/rest/9.7.0/ndmp_session.yaml"},{"location":"ontap-metrics/#net_port_mtu","title":"net_port_mtu","text":"

    Maximum transmission unit, largest packet size on this network

    API Endpoint Metric Template REST api/network/ethernet/ports mtu conf/rest/9.12.0/netport.yaml ZAPI net-port-get-iter net-port-info.mtu conf/zapi/cdot/9.8.0/netport.yaml"},{"location":"ontap-metrics/#netstat_bytes_recvd","title":"netstat_bytes_recvd","text":"

    Number of bytes received by a TCP connection

    API Endpoint Metric Template ZAPI perf-object-get-instances netstat bytes_recvdUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_bytes_sent","title":"netstat_bytes_sent","text":"

    Number of bytes sent by a TCP connection

    API Endpoint Metric Template ZAPI perf-object-get-instances netstat bytes_sentUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_cong_win","title":"netstat_cong_win","text":"

    Congestion window of a TCP connection

    API Endpoint Metric Template ZAPI perf-object-get-instances netstat cong_winUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_cong_win_th","title":"netstat_cong_win_th","text":"

    Congestion window threshold of a TCP connection

    API Endpoint Metric Template ZAPI perf-object-get-instances netstat cong_win_thUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_ooorcv_pkts","title":"netstat_ooorcv_pkts","text":"

    Number of out-of-order packets received by this TCP connection

    API Endpoint Metric Template ZAPI perf-object-get-instances netstat ooorcv_pktsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_recv_window","title":"netstat_recv_window","text":"

    Receive window size of a TCP connection

    API Endpoint Metric Template ZAPI perf-object-get-instances netstat recv_windowUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_rexmit_pkts","title":"netstat_rexmit_pkts","text":"

    Number of packets retransmitted by this TCP connection

    API Endpoint Metric Template ZAPI perf-object-get-instances netstat rexmit_pktsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_send_window","title":"netstat_send_window","text":"

    Send window size of a TCP connection

    API Endpoint Metric Template ZAPI perf-object-get-instances netstat send_windowUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#nfs_clients_idle_duration","title":"nfs_clients_idle_duration","text":"

    Specifies an ISO-8601 format of date and time to retrieve the idle time duration in hours, minutes, and seconds format.

    API Endpoint Metric Template REST api/protocols/nfs/connected-clients idle_duration conf/rest/9.7.0/nfs_clients.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_bytelockalloc","title":"nfs_diag_storePool_ByteLockAlloc","text":"

    Current number of byte range lock objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.byte_lock_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ByteLockAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_bytelockmax","title":"nfs_diag_storePool_ByteLockMax","text":"

    Maximum number of byte range lock objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.byte_lock_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ByteLockMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_clientalloc","title":"nfs_diag_storePool_ClientAlloc","text":"

    Current number of client objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.client_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ClientAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_clientmax","title":"nfs_diag_storePool_ClientMax","text":"

    Maximum number of client objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.client_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ClientMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_connectionparentsessionreferencealloc","title":"nfs_diag_storePool_ConnectionParentSessionReferenceAlloc","text":"

    Current number of connection parent session reference objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.connection_parent_session_reference_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ConnectionParentSessionReferenceAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_connectionparentsessionreferencemax","title":"nfs_diag_storePool_ConnectionParentSessionReferenceMax","text":"

    Maximum number of connection parent session reference objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.connection_parent_session_reference_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ConnectionParentSessionReferenceMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_copystatealloc","title":"nfs_diag_storePool_CopyStateAlloc","text":"

    Current number of copy state objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.copy_state_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_CopyStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_copystatemax","title":"nfs_diag_storePool_CopyStateMax","text":"

    Maximum number of copy state objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.copy_state_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_CopyStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_delegalloc","title":"nfs_diag_storePool_DelegAlloc","text":"

    Current number of delegation lock objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.delegation_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_DelegAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_delegmax","title":"nfs_diag_storePool_DelegMax","text":"

    Maximum number delegation lock objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.delegation_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_DelegMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_delegstatealloc","title":"nfs_diag_storePool_DelegStateAlloc","text":"

    Current number of delegation state objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.delegation_state_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_DelegStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_delegstatemax","title":"nfs_diag_storePool_DelegStateMax","text":"

    Maximum number of delegation state objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.delegation_state_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_DelegStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_layoutalloc","title":"nfs_diag_storePool_LayoutAlloc","text":"

    Current number of layout objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.layout_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LayoutAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_layoutmax","title":"nfs_diag_storePool_LayoutMax","text":"

    Maximum number of layout objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.layout_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LayoutMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_layoutstatealloc","title":"nfs_diag_storePool_LayoutStateAlloc","text":"

    Current number of layout state objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.layout_state_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LayoutStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_layoutstatemax","title":"nfs_diag_storePool_LayoutStateMax","text":"

    Maximum number of layout state objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.layout_state_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LayoutStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_lockstatealloc","title":"nfs_diag_storePool_LockStateAlloc","text":"

    Current number of lock state objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.lock_state_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LockStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_lockstatemax","title":"nfs_diag_storePool_LockStateMax","text":"

    Maximum number of lock state objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.lock_state_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LockStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_openalloc","title":"nfs_diag_storePool_OpenAlloc","text":"

    Current number of share objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.open_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OpenAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_openmax","title":"nfs_diag_storePool_OpenMax","text":"

    Maximum number of share lock objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.open_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OpenMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_openstatealloc","title":"nfs_diag_storePool_OpenStateAlloc","text":"

    Current number of open state objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.openstate_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OpenStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_openstatemax","title":"nfs_diag_storePool_OpenStateMax","text":"

    Maximum number of open state objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.openstate_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OpenStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_owneralloc","title":"nfs_diag_storePool_OwnerAlloc","text":"

    Current number of owner objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.owner_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OwnerAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_ownermax","title":"nfs_diag_storePool_OwnerMax","text":"

    Maximum number of owner objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.owner_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OwnerMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionalloc","title":"nfs_diag_storePool_SessionAlloc","text":"

    Current number of session objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionconnectionholderalloc","title":"nfs_diag_storePool_SessionConnectionHolderAlloc","text":"

    Current number of session connection holder objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_connection_holder_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionConnectionHolderAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionconnectionholdermax","title":"nfs_diag_storePool_SessionConnectionHolderMax","text":"

    Maximum number of session connection holder objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_connection_holder_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionConnectionHolderMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionholderalloc","title":"nfs_diag_storePool_SessionHolderAlloc","text":"

    Current number of session holder objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_holder_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionHolderAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionholdermax","title":"nfs_diag_storePool_SessionHolderMax","text":"

    Maximum number of session holder objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_holder_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionHolderMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionmax","title":"nfs_diag_storePool_SessionMax","text":"

    Maximum number of session objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_staterefhistoryalloc","title":"nfs_diag_storePool_StateRefHistoryAlloc","text":"

    Current number of state reference callstack history objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.state_reference_history_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_StateRefHistoryAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_staterefhistorymax","title":"nfs_diag_storePool_StateRefHistoryMax","text":"

    Maximum number of state reference callstack history objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.state_reference_history_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_StateRefHistoryMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_stringalloc","title":"nfs_diag_storePool_StringAlloc","text":"

    Current number of string objects allocated.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.string_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_StringAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_stringmax","title":"nfs_diag_storePool_StringMax","text":"

    Maximum number of string objects.

    API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.string_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_StringMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nic_link_up_to_downs","title":"nic_link_up_to_downs","text":"

    Number of link state change from UP to DOWN.

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common link_up_to_downUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common link_up_to_downsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_alignment_errors","title":"nic_rx_alignment_errors","text":"

    Alignment errors detected on received packets

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_alignment_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_alignment_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_bytes","title":"nic_rx_bytes","text":"

    Bytes received

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_bytesUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_bytesUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_crc_errors","title":"nic_rx_crc_errors","text":"

    CRC errors detected on received packets

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_crc_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_crc_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_errors","title":"nic_rx_errors","text":"

    Error received

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_errorsUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_errorsUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_length_errors","title":"nic_rx_length_errors","text":"

    Length errors detected on received packets

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_length_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_length_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_total_errors","title":"nic_rx_total_errors","text":"

    Total errors received

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_total_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_total_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_tx_bytes","title":"nic_tx_bytes","text":"

    Bytes sent

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common transmit_bytesUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common tx_bytesUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_tx_errors","title":"nic_tx_errors","text":"

    Error sent

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common transmit_errorsUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common tx_errorsUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_tx_hw_errors","title":"nic_tx_hw_errors","text":"

    Transmit errors reported by hardware

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common transmit_hw_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common tx_hw_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_tx_total_errors","title":"nic_tx_total_errors","text":"

    Total errors sent

    API Endpoint Metric Template REST api/cluster/counter/tables/nic_common transmit_total_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common tx_total_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#node_avg_processor_busy","title":"node_avg_processor_busy","text":"

    Average processor utilization across active processors in the system

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node average_processor_busy_percentUnit: percentType: percentBase: cpu_elapsed_time conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node avg_processor_busyUnit: percentType: percentBase: cpu_elapsed_time conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_cifs_connections","title":"node_cifs_connections","text":"

    Number of connections

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node connectionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node connectionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_established_sessions","title":"node_cifs_established_sessions","text":"

    Number of established SMB and SMB2 sessions

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node established_sessionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node established_sessionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_latency","title":"node_cifs_latency","text":"

    Average latency for CIFS operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node latencyUnit: microsecType: averageBase: latency_base conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_latencyUnit: microsecType: averageBase: cifs_latency_base conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_op_count","title":"node_cifs_op_count","text":"

    Array of select CIFS operation counts

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node op_countUnit: noneType: rateBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_op_countUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_open_files","title":"node_cifs_open_files","text":"

    Number of open files over SMB and SMB2

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node open_filesUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node open_filesUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_ops","title":"node_cifs_ops","text":"

    Number of CIFS operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node cifs_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node cifs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_cifs_read_latency","title":"node_cifs_read_latency","text":"

    Average latency for CIFS read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node average_read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_read_latencyUnit: microsecType: averageBase: cifs_read_ops conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_read_ops","title":"node_cifs_read_ops","text":"

    Total number of CIFS read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_total_ops","title":"node_cifs_total_ops","text":"

    Total number of CIFS operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_write_latency","title":"node_cifs_write_latency","text":"

    Average latency for CIFS write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node average_write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_write_latencyUnit: microsecType: averageBase: cifs_write_ops conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_write_ops","title":"node_cifs_write_ops","text":"

    Total number of CIFS write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cpu_busy","title":"node_cpu_busy","text":"

    System CPU resource utilization. Returns a computed percentage for the default CPU field. Basically computes a 'cpu usage summary' value which indicates how 'busy' the system is based upon the most heavily utilized domain. The idea is to determine the amount of available CPU until we're limited by either a domain maxing out OR we exhaust all available idle CPU cycles, whichever occurs first.

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node cpu_busyUnit: percentType: percentBase: cpu_elapsed_time conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node cpu_busyUnit: percentType: percentBase: cpu_elapsed_time conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_cpu_busytime","title":"node_cpu_busytime","text":"

    The time (in hundredths of a second) that the CPU has been doing useful work since the last boot

    API Endpoint Metric Template REST api/private/cli/node cpu_busy_time conf/rest/9.12.0/node.yaml ZAPI system-node-get-iter node-details-info.cpu-busytime conf/zapi/cdot/9.8.0/node.yaml"},{"location":"ontap-metrics/#node_cpu_domain_busy","title":"node_cpu_domain_busy","text":"

    Array of processor time in percentage spent in various domains

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node domain_busyUnit: percentType: percentBase: cpu_elapsed_time conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node domain_busyUnit: percentType: percentBase: cpu_elapsed_time conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_cpu_elapsed_time","title":"node_cpu_elapsed_time","text":"

    Elapsed time since boot

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node cpu_elapsed_timeUnit: microsecType: deltaBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node cpu_elapsed_timeUnit: noneType: delta,no-displayBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_disk_busy","title":"node_disk_busy","text":"

    The utilization percent of the disk. node_disk_busy is disk_busy aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_capacity","title":"node_disk_capacity","text":"

    Disk capacity in MB. node_disk_capacity is disk_capacity aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_cp_read_chain","title":"node_disk_cp_read_chain","text":"

    Average number of blocks transferred in each consistency point read operation during a CP. node_disk_cp_read_chain is disk_cp_read_chain aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_cp_read_latency","title":"node_disk_cp_read_latency","text":"

    Average latency per block in microseconds for consistency point read operations. node_disk_cp_read_latency is disk_cp_read_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_cp_reads","title":"node_disk_cp_reads","text":"

    Number of disk read operations initiated each second for consistency point processing. node_disk_cp_reads is disk_cp_reads aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_data_read","title":"node_disk_data_read","text":"

    Number of disk kilobytes (KB) read per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node disk_data_readUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node disk_data_readUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_disk_data_written","title":"node_disk_data_written","text":"

    Number of disk kilobytes (KB) written per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node disk_data_writtenUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node disk_data_writtenUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_disk_io_pending","title":"node_disk_io_pending","text":"

    Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_io_pending is disk_io_pending aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_io_queued","title":"node_disk_io_queued","text":"

    Number of I/Os queued to the disk but not yet issued. node_disk_io_queued is disk_io_queued aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_busy","title":"node_disk_max_busy","text":"

    The utilization percent of the disk. node_disk_max_busy is the maximum of disk_busy for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_capacity","title":"node_disk_max_capacity","text":"

    Disk capacity in MB. node_disk_max_capacity is the maximum of disk_capacity for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_cp_read_chain","title":"node_disk_max_cp_read_chain","text":"

    Average number of blocks transferred in each consistency point read operation during a CP. node_disk_max_cp_read_chain is the maximum of disk_cp_read_chain for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_cp_read_latency","title":"node_disk_max_cp_read_latency","text":"

    Average latency per block in microseconds for consistency point read operations. node_disk_max_cp_read_latency is the maximum of disk_cp_read_latency for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_cp_reads","title":"node_disk_max_cp_reads","text":"

    Number of disk read operations initiated each second for consistency point processing. node_disk_max_cp_reads is the maximum of disk_cp_reads for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_io_pending","title":"node_disk_max_io_pending","text":"

    Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_max_io_pending is the maximum of disk_io_pending for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_io_queued","title":"node_disk_max_io_queued","text":"

    Number of I/Os queued to the disk but not yet issued. node_disk_max_io_queued is the maximum of disk_io_queued for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_total_data","title":"node_disk_max_total_data","text":"

    Total throughput for user operations per second. node_disk_max_total_data is the maximum of disk_total_data for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_total_transfers","title":"node_disk_max_total_transfers","text":"

    Total number of disk operations involving data transfer initiated per second. node_disk_max_total_transfers is the maximum of disk_total_transfers for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_read_blocks","title":"node_disk_max_user_read_blocks","text":"

    Number of blocks transferred for user read operations per second. node_disk_max_user_read_blocks is the maximum of disk_user_read_blocks for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_read_chain","title":"node_disk_max_user_read_chain","text":"

    Average number of blocks transferred in each user read operation. node_disk_max_user_read_chain is the maximum of disk_user_read_chain for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_read_latency","title":"node_disk_max_user_read_latency","text":"

    Average latency per block in microseconds for user read operations. node_disk_max_user_read_latency is the maximum of disk_user_read_latency for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_reads","title":"node_disk_max_user_reads","text":"

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_max_user_reads is the maximum of disk_user_reads for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_write_blocks","title":"node_disk_max_user_write_blocks","text":"

    Number of blocks transferred for user write operations per second. node_disk_max_user_write_blocks is the maximum of disk_user_write_blocks for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_write_chain","title":"node_disk_max_user_write_chain","text":"

    Average number of blocks transferred in each user write operation. node_disk_max_user_write_chain is the maximum of disk_user_write_chain for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_write_latency","title":"node_disk_max_user_write_latency","text":"

    Average latency per block in microseconds for user write operations. node_disk_max_user_write_latency is the maximum of disk_user_write_latency for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_writes","title":"node_disk_max_user_writes","text":"

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_max_user_writes is the maximum of disk_user_writes for label node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_total_data","title":"node_disk_total_data","text":"

    Total throughput for user operations per second. node_disk_total_data is disk_total_data aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_total_transfers","title":"node_disk_total_transfers","text":"

    Total number of disk operations involving data transfer initiated per second. node_disk_total_transfers is disk_total_transfers aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_read_blocks","title":"node_disk_user_read_blocks","text":"

    Number of blocks transferred for user read operations per second. node_disk_user_read_blocks is disk_user_read_blocks aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_read_chain","title":"node_disk_user_read_chain","text":"

    Average number of blocks transferred in each user read operation. node_disk_user_read_chain is disk_user_read_chain aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_read_latency","title":"node_disk_user_read_latency","text":"

    Average latency per block in microseconds for user read operations. node_disk_user_read_latency is disk_user_read_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_reads","title":"node_disk_user_reads","text":"

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_user_reads is disk_user_reads aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_write_blocks","title":"node_disk_user_write_blocks","text":"

    Number of blocks transferred for user write operations per second. node_disk_user_write_blocks is disk_user_write_blocks aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_write_chain","title":"node_disk_user_write_chain","text":"

    Average number of blocks transferred in each user write operation. node_disk_user_write_chain is disk_user_write_chain aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_write_latency","title":"node_disk_user_write_latency","text":"

    Average latency per block in microseconds for user write operations. node_disk_user_write_latency is disk_user_write_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_writes","title":"node_disk_user_writes","text":"

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_user_writes is disk_user_writes aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_failed_fan","title":"node_failed_fan","text":"

    Specifies a count of the number of chassis fans that are not operating within the recommended RPM range.

    API Endpoint Metric Template REST api/cluster/nodes controller.failed_fan.count conf/rest/9.12.0/node.yaml ZAPI system-node-get-iter node-details-info.env-failed-fan-count conf/zapi/cdot/9.8.0/node.yaml"},{"location":"ontap-metrics/#node_failed_power","title":"node_failed_power","text":"

    Number of failed power supply units.

    API Endpoint Metric Template REST api/cluster/nodes controller.failed_power_supply.count conf/rest/9.12.0/node.yaml ZAPI system-node-get-iter node-details-info.env-failed-power-supply-count conf/zapi/cdot/9.8.0/node.yaml"},{"location":"ontap-metrics/#node_fcp_data_recv","title":"node_fcp_data_recv","text":"

    Number of FCP kilobytes (KB) received per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node fcp_data_receivedUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node fcp_data_recvUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_fcp_data_sent","title":"node_fcp_data_sent","text":"

    Number of FCP kilobytes (KB) sent per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node fcp_data_sentUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node fcp_data_sentUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_fcp_ops","title":"node_fcp_ops","text":"

    Number of FCP operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node fcp_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node fcp_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_hdd_data_read","title":"node_hdd_data_read","text":"

    Number of HDD Disk kilobytes (KB) read per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node hdd_data_readUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node hdd_data_readUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_hdd_data_written","title":"node_hdd_data_written","text":"

    Number of HDD kilobytes (KB) written per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node hdd_data_writtenUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node hdd_data_writtenUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_iscsi_ops","title":"node_iscsi_ops","text":"

    Number of iSCSI operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node iscsi_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node iscsi_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_memory","title":"node_memory","text":"

    Total memory in megabytes (MB)

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node memoryUnit: noneType: rawBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node memoryUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_net_data_recv","title":"node_net_data_recv","text":"

    Number of network kilobytes (KB) received per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node network_data_receivedUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node net_data_recvUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_net_data_sent","title":"node_net_data_sent","text":"

    Number of network kilobytes (KB) sent per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node network_data_sentUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node net_data_sentUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_nfs_access_avg_latency","title":"node_nfs_access_avg_latency","text":"

    Average latency of Access procedure requests. The counter keeps track of the average response time of Access requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_access_total","title":"node_nfs_access_total","text":"

    Total number of Access procedure requests. It is the total number of access success and access error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_backchannel_ctl_avg_latency","title":"node_nfs_backchannel_ctl_avg_latency","text":"

    Average latency of BACKCHANNEL_CTL operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node backchannel_ctl.average_latencyUnit: microsecType: averageBase: backchannel_ctl.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node backchannel_ctl.average_latencyUnit: microsecType: averageBase: backchannel_ctl.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node backchannel_ctl_avg_latencyUnit: microsecType: average,no-zero-valuesBase: backchannel_ctl_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node backchannel_ctl_avg_latencyUnit: microsecType: average,no-zero-valuesBase: backchannel_ctl_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_backchannel_ctl_total","title":"node_nfs_backchannel_ctl_total","text":"

    Total number of BACKCHANNEL_CTL operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node backchannel_ctl.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node backchannel_ctl.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node backchannel_ctl_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node backchannel_ctl_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_bind_conn_to_session_avg_latency","title":"node_nfs_bind_conn_to_session_avg_latency","text":"

    Average latency of BIND_CONN_TO_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node bind_connections_to_session.average_latencyUnit: microsecType: averageBase: bind_connections_to_session.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node bind_conn_to_session.average_latencyUnit: microsecType: averageBase: bind_conn_to_session.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node bind_conn_to_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: bind_conn_to_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node bind_conn_to_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: bind_conn_to_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_bind_conn_to_session_total","title":"node_nfs_bind_conn_to_session_total","text":"

    Total number of BIND_CONN_TO_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node bind_connections_to_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node bind_conn_to_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node bind_conn_to_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node bind_conn_to_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_close_avg_latency","title":"node_nfs_close_avg_latency","text":"

    Average latency of CLOSE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_close_total","title":"node_nfs_close_total","text":"

    Total number of CLOSE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_commit_avg_latency","title":"node_nfs_commit_avg_latency","text":"

    Average latency of Commit procedure requests. The counter keeps track of the average response time of Commit requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_commit_total","title":"node_nfs_commit_total","text":"

    Total number of Commit procedure requests. It is the total number of Commit success and Commit error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_create_avg_latency","title":"node_nfs_create_avg_latency","text":"

    Average latency of Create procedure requests. The counter keeps track of the average response time of Create requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_create_session_avg_latency","title":"node_nfs_create_session_avg_latency","text":"

    Average latency of CREATE_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node create_session.average_latencyUnit: microsecType: averageBase: create_session.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node create_session.average_latencyUnit: microsecType: averageBase: create_session.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node create_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node create_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_create_session_total","title":"node_nfs_create_session_total","text":"

    Total number of CREATE_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node create_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node create_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node create_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node create_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_create_total","title":"node_nfs_create_total","text":"

    Total number Create of procedure requests. It is the total number of create success and create error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_delegpurge_avg_latency","title":"node_nfs_delegpurge_avg_latency","text":"

    Average latency of DELEGPURGE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_delegpurge_total","title":"node_nfs_delegpurge_total","text":"

    Total number of DELEGPURGE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_delegreturn_avg_latency","title":"node_nfs_delegreturn_avg_latency","text":"

    Average latency of DELEGRETURN operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_delegreturn_total","title":"node_nfs_delegreturn_total","text":"

    Total number of DELEGRETURN operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_destroy_clientid_avg_latency","title":"node_nfs_destroy_clientid_avg_latency","text":"

    Average latency of DESTROY_CLIENTID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node destroy_clientid.average_latencyUnit: microsecType: averageBase: destroy_clientid.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node destroy_clientid.average_latencyUnit: microsecType: averageBase: destroy_clientid.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node destroy_clientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_clientid_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node destroy_clientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_clientid_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_destroy_clientid_total","title":"node_nfs_destroy_clientid_total","text":"

    Total number of DESTROY_CLIENTID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node destroy_clientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node destroy_clientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node destroy_clientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node destroy_clientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_destroy_session_avg_latency","title":"node_nfs_destroy_session_avg_latency","text":"

    Average latency of DESTROY_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node destroy_session.average_latencyUnit: microsecType: averageBase: destroy_session.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node destroy_session.average_latencyUnit: microsecType: averageBase: destroy_session.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node destroy_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node destroy_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_destroy_session_total","title":"node_nfs_destroy_session_total","text":"

    Total number of DESTROY_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node destroy_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node destroy_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node destroy_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node destroy_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_exchange_id_avg_latency","title":"node_nfs_exchange_id_avg_latency","text":"

    Average latency of EXCHANGE_ID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node exchange_id.average_latencyUnit: microsecType: averageBase: exchange_id.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node exchange_id.average_latencyUnit: microsecType: averageBase: exchange_id.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node exchange_id_avg_latencyUnit: microsecType: average,no-zero-valuesBase: exchange_id_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node exchange_id_avg_latencyUnit: microsecType: average,no-zero-valuesBase: exchange_id_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_exchange_id_total","title":"node_nfs_exchange_id_total","text":"

    Total number of EXCHANGE_ID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node exchange_id.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node exchange_id.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node exchange_id_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node exchange_id_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_free_stateid_avg_latency","title":"node_nfs_free_stateid_avg_latency","text":"

    Average latency of FREE_STATEID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node free_stateid.average_latencyUnit: microsecType: averageBase: free_stateid.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node free_stateid.average_latencyUnit: microsecType: averageBase: free_stateid.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node free_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: free_stateid_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node free_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: free_stateid_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_free_stateid_total","title":"node_nfs_free_stateid_total","text":"

    Total number of FREE_STATEID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node free_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node free_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node free_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node free_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_fsinfo_avg_latency","title":"node_nfs_fsinfo_avg_latency","text":"

    Average latency of FSInfo procedure requests. The counter keeps track of the average response time of FSInfo requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node fsinfo.average_latencyUnit: microsecType: averageBase: fsinfo.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node fsinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: fsinfo_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_fsinfo_total","title":"node_nfs_fsinfo_total","text":"

    Total number FSInfo of procedure requests. It is the total number of FSInfo success and FSInfo error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node fsinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node fsinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_fsstat_avg_latency","title":"node_nfs_fsstat_avg_latency","text":"

    Average latency of FSStat procedure requests. The counter keeps track of the average response time of FSStat requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node fsstat.average_latencyUnit: microsecType: averageBase: fsstat.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node fsstat_avg_latencyUnit: microsecType: average,no-zero-valuesBase: fsstat_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_fsstat_total","title":"node_nfs_fsstat_total","text":"

    Total number FSStat of procedure requests. It is the total number of FSStat success and FSStat error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node fsstat.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node fsstat_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_get_dir_delegation_avg_latency","title":"node_nfs_get_dir_delegation_avg_latency","text":"

    Average latency of GET_DIR_DELEGATION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node get_dir_delegation.average_latencyUnit: microsecType: averageBase: get_dir_delegation.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node get_dir_delegation.average_latencyUnit: microsecType: averageBase: get_dir_delegation.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node get_dir_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: get_dir_delegation_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node get_dir_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: get_dir_delegation_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_get_dir_delegation_total","title":"node_nfs_get_dir_delegation_total","text":"

    Total number of GET_DIR_DELEGATION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node get_dir_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node get_dir_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node get_dir_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node get_dir_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getattr_avg_latency","title":"node_nfs_getattr_avg_latency","text":"

    Average latency of GetAttr procedure requests. This counter keeps track of the average response time of GetAttr requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_getattr_total","title":"node_nfs_getattr_total","text":"

    Total number of Getattr procedure requests. It is the total number of getattr success and getattr error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_getdeviceinfo_avg_latency","title":"node_nfs_getdeviceinfo_avg_latency","text":"

    Average latency of GETDEVICEINFO operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getdeviceinfo.average_latencyUnit: microsecType: averageBase: getdeviceinfo.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getdeviceinfo.average_latencyUnit: microsecType: averageBase: getdeviceinfo.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getdeviceinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdeviceinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getdeviceinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdeviceinfo_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getdeviceinfo_total","title":"node_nfs_getdeviceinfo_total","text":"

    Total number of GETDEVICEINFO operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getdeviceinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getdeviceinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getdeviceinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getdeviceinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getdevicelist_avg_latency","title":"node_nfs_getdevicelist_avg_latency","text":"

    Average latency of GETDEVICELIST operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getdevicelist.average_latencyUnit: microsecType: averageBase: getdevicelist.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getdevicelist.average_latencyUnit: microsecType: averageBase: getdevicelist.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getdevicelist_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdevicelist_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getdevicelist_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdevicelist_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getdevicelist_total","title":"node_nfs_getdevicelist_total","text":"

    Total number of GETDEVICELIST operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getdevicelist.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getdevicelist.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getdevicelist_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getdevicelist_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getfh_avg_latency","title":"node_nfs_getfh_avg_latency","text":"

    Average latency of GETFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_getfh_total","title":"node_nfs_getfh_total","text":"

    Total number of GETFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_latency","title":"node_nfs_latency","text":"

    Average latency of NFSv3 requests. This counter keeps track of the average response time of NFSv3 requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutcommit_avg_latency","title":"node_nfs_layoutcommit_avg_latency","text":"

    Average latency of LAYOUTCOMMIT operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutcommit.average_latencyUnit: microsecType: averageBase: layoutcommit.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutcommit.average_latencyUnit: microsecType: averageBase: layoutcommit.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutcommit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutcommit_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutcommit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutcommit_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutcommit_total","title":"node_nfs_layoutcommit_total","text":"

    Total number of LAYOUTCOMMIT operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutcommit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutcommit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutcommit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutcommit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutget_avg_latency","title":"node_nfs_layoutget_avg_latency","text":"

    Average latency of LAYOUTGET operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutget.average_latencyUnit: microsecType: averageBase: layoutget.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutget.average_latencyUnit: microsecType: averageBase: layoutget.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutget_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutget_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutget_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutget_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutget_total","title":"node_nfs_layoutget_total","text":"

    Total number of LAYOUTGET operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutget.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutget.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutget_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutget_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutreturn_avg_latency","title":"node_nfs_layoutreturn_avg_latency","text":"

    Average latency of LAYOUTRETURN operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutreturn.average_latencyUnit: microsecType: averageBase: layoutreturn.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutreturn.average_latencyUnit: microsecType: averageBase: layoutreturn.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutreturn_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutreturn_total","title":"node_nfs_layoutreturn_total","text":"

    Total number of LAYOUTRETURN operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_link_avg_latency","title":"node_nfs_link_avg_latency","text":"

    Average latency of Link procedure requests. The counter keeps track of the average response time of Link requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_link_total","title":"node_nfs_link_total","text":"

    Total number Link of procedure requests. It is the total number of Link success and Link error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lock_avg_latency","title":"node_nfs_lock_avg_latency","text":"

    Average latency of LOCK operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lock_total","title":"node_nfs_lock_total","text":"

    Total number of LOCK operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lockt_avg_latency","title":"node_nfs_lockt_avg_latency","text":"

    Average latency of LOCKT operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lockt_total","title":"node_nfs_lockt_total","text":"

    Total number of LOCKT operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_locku_avg_latency","title":"node_nfs_locku_avg_latency","text":"

    Average latency of LOCKU operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_locku_total","title":"node_nfs_locku_total","text":"

    Total number of LOCKU operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lookup_avg_latency","title":"node_nfs_lookup_avg_latency","text":"

    Average latency of LookUp procedure requests. This shows the average time it takes for the LookUp operation to reply to the request.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lookup_total","title":"node_nfs_lookup_total","text":"

    Total number of Lookup procedure requests. It is the total number of lookup success and lookup error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lookupp_avg_latency","title":"node_nfs_lookupp_avg_latency","text":"

    Average latency of LOOKUPP operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lookupp_total","title":"node_nfs_lookupp_total","text":"

    Total number of LOOKUPP operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_mkdir_avg_latency","title":"node_nfs_mkdir_avg_latency","text":"

    Average latency of MkDir procedure requests. The counter keeps track of the average response time of MkDir requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node mkdir.average_latencyUnit: microsecType: averageBase: mkdir.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node mkdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: mkdir_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_mkdir_total","title":"node_nfs_mkdir_total","text":"

    Total number MkDir of procedure requests. It is the total number of MkDir success and MkDir error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node mkdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node mkdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_mknod_avg_latency","title":"node_nfs_mknod_avg_latency","text":"

    Average latency of MkNod procedure requests. The counter keeps track of the average response time of MkNod requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node mknod.average_latencyUnit: microsecType: averageBase: mknod.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node mknod_avg_latencyUnit: microsecType: average,no-zero-valuesBase: mknod_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_mknod_total","title":"node_nfs_mknod_total","text":"

    Total number MkNod of procedure requests. It is the total number of MkNod success and MkNod error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node mknod.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node mknod_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_null_avg_latency","title":"node_nfs_null_avg_latency","text":"

    Average latency of Null procedure requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_null_total","title":"node_nfs_null_total","text":"

    Total number of Null procedure requests. It is the total of null success and null error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_nverify_avg_latency","title":"node_nfs_nverify_avg_latency","text":"

    Average latency of NVERIFY operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_nverify_total","title":"node_nfs_nverify_total","text":"

    Total number of NVERIFY operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_avg_latency","title":"node_nfs_open_avg_latency","text":"

    Average latency of OPEN operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_confirm_avg_latency","title":"node_nfs_open_confirm_avg_latency","text":"

    Average latency of OPEN_CONFIRM procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node open_confirm.average_latencyUnit: microsecType: averageBase: open_confirm.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node open_confirm_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_confirm_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_confirm_total","title":"node_nfs_open_confirm_total","text":"

    Total number of OPEN_CONFIRM procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node open_confirm.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node open_confirm_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_downgrade_avg_latency","title":"node_nfs_open_downgrade_avg_latency","text":"

    Average latency of OPEN_DOWNGRADE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_downgrade_total","title":"node_nfs_open_downgrade_total","text":"

    Total number of OPEN_DOWNGRADE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_total","title":"node_nfs_open_total","text":"

    Total number of OPEN operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_openattr_avg_latency","title":"node_nfs_openattr_avg_latency","text":"

    Average latency of OPENATTR operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_openattr_total","title":"node_nfs_openattr_total","text":"

    Total number of OPENATTR operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_ops","title":"node_nfs_ops","text":"

    Number of NFS operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node nfs_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node nfs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_nfs_pathconf_avg_latency","title":"node_nfs_pathconf_avg_latency","text":"

    Average latency of PathConf procedure requests. The counter keeps track of the average response time of PathConf requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node pathconf.average_latencyUnit: microsecType: averageBase: pathconf.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node pathconf_avg_latencyUnit: microsecType: average,no-zero-valuesBase: pathconf_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_pathconf_total","title":"node_nfs_pathconf_total","text":"

    Total number PathConf of procedure requests. It is the total number of PathConf success and PathConf error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node pathconf.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node pathconf_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_putfh_avg_latency","title":"node_nfs_putfh_avg_latency","text":"

    The number of successful PUTPUBFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putfh.average_latencyUnit: noneType: deltaBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putfh.average_latencyUnit: microsecType: averageBase: putfh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putfh.average_latencyUnit: microsecType: averageBase: putfh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putfh_total","title":"node_nfs_putfh_total","text":"

    Total number of PUTFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putpubfh_avg_latency","title":"node_nfs_putpubfh_avg_latency","text":"

    Average latency of PUTPUBFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putpubfh_total","title":"node_nfs_putpubfh_total","text":"

    Total number of PUTPUBFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putrootfh_avg_latency","title":"node_nfs_putrootfh_avg_latency","text":"

    Average latency of PUTROOTFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putrootfh_total","title":"node_nfs_putrootfh_total","text":"

    Total number of PUTROOTFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_avg_latency","title":"node_nfs_read_avg_latency","text":"

    Average latency of Read procedure requests. The counter keeps track of the average response time of Read requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_ops","title":"node_nfs_read_ops","text":"

    Total observed NFSv3 read operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_read_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_symlink_avg_latency","title":"node_nfs_read_symlink_avg_latency","text":"

    Average latency of ReadSymLink procedure requests. The counter keeps track of the average response time of ReadSymLink requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read_symlink.average_latencyUnit: microsecType: averageBase: read_symlink.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node read_symlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_symlink_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_symlink_total","title":"node_nfs_read_symlink_total","text":"

    Total number of ReadSymLink procedure requests. It is the total number of read symlink success and read symlink error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read_symlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node read_symlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_throughput","title":"node_nfs_read_throughput","text":"

    Rate of NFSv3 read data transfers per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nfs41_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nfs42_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nfs4_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_total","title":"node_nfs_read_total","text":"

    Total number Read of procedure requests. It is the total number of read success and read error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_readdir_avg_latency","title":"node_nfs_readdir_avg_latency","text":"

    Average latency of ReadDir procedure requests. The counter keeps track of the average response time of ReadDir requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_readdir_total","title":"node_nfs_readdir_total","text":"

    Total number ReadDir of procedure requests. It is the total number of ReadDir success and ReadDir error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_readdirplus_avg_latency","title":"node_nfs_readdirplus_avg_latency","text":"

    Average latency of ReadDirPlus procedure requests. The counter keeps track of the average response time of ReadDirPlus requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node readdirplus.average_latencyUnit: microsecType: averageBase: readdirplus.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node readdirplus_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdirplus_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_readdirplus_total","title":"node_nfs_readdirplus_total","text":"

    Total number ReadDirPlus of procedure requests. It is the total number of ReadDirPlus success and ReadDirPlus error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node readdirplus.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node readdirplus_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_readlink_avg_latency","title":"node_nfs_readlink_avg_latency","text":"

    Average latency of READLINK operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_readlink_total","title":"node_nfs_readlink_total","text":"

    Total number of READLINK operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_reclaim_complete_avg_latency","title":"node_nfs_reclaim_complete_avg_latency","text":"

    Average latency of RECLAIM_COMPLETE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node reclaim_complete.average_latencyUnit: microsecType: averageBase: reclaim_complete.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node reclaim_complete.average_latencyUnit: microsecType: averageBase: reclaim_complete.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node reclaim_complete_avg_latencyUnit: microsecType: average,no-zero-valuesBase: reclaim_complete_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node reclaim_complete_avg_latencyUnit: microsecType: average,no-zero-valuesBase: reclaim_complete_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_reclaim_complete_total","title":"node_nfs_reclaim_complete_total","text":"

    Total number of RECLAIM_COMPLETE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node reclaim_complete.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node reclaim_complete.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node reclaim_complete_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node reclaim_complete_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_release_lock_owner_avg_latency","title":"node_nfs_release_lock_owner_avg_latency","text":"

    Average Latency of RELEASE_LOCKOWNER procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node release_lock_owner.average_latencyUnit: microsecType: averageBase: release_lock_owner.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node release_lock_owner_avg_latencyUnit: microsecType: average,no-zero-valuesBase: release_lock_owner_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_release_lock_owner_total","title":"node_nfs_release_lock_owner_total","text":"

    Total number of RELEASE_LOCKOWNER procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node release_lock_owner.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node release_lock_owner_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_remove_avg_latency","title":"node_nfs_remove_avg_latency","text":"

    Average latency of Remove procedure requests. The counter keeps track of the average response time of Remove requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_remove_total","title":"node_nfs_remove_total","text":"

    Total number Remove of procedure requests. It is the total number of Remove success and Remove error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_rename_avg_latency","title":"node_nfs_rename_avg_latency","text":"

    Average latency of Rename procedure requests. The counter keeps track of the average response time of Rename requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_rename_total","title":"node_nfs_rename_total","text":"

    Total number Rename of procedure requests. It is the total number of Rename success and Rename error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_renew_avg_latency","title":"node_nfs_renew_avg_latency","text":"

    Average latency of RENEW procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node renew.average_latencyUnit: microsecType: averageBase: renew.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node renew_avg_latencyUnit: microsecType: average,no-zero-valuesBase: renew_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_renew_total","title":"node_nfs_renew_total","text":"

    Total number of RENEW procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node renew.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node renew_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_restorefh_avg_latency","title":"node_nfs_restorefh_avg_latency","text":"

    Average latency of RESTOREFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_restorefh_total","title":"node_nfs_restorefh_total","text":"

    Total number of RESTOREFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_rmdir_avg_latency","title":"node_nfs_rmdir_avg_latency","text":"

    Average latency of RmDir procedure requests. The counter keeps track of the average response time of RmDir requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node rmdir.average_latencyUnit: microsecType: averageBase: rmdir.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node rmdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rmdir_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_rmdir_total","title":"node_nfs_rmdir_total","text":"

    Total number RmDir of procedure requests. It is the total number of RmDir success and RmDir error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node rmdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node rmdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_savefh_avg_latency","title":"node_nfs_savefh_avg_latency","text":"

    Average latency of SAVEFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_savefh_total","title":"node_nfs_savefh_total","text":"

    Total number of SAVEFH operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_secinfo_avg_latency","title":"node_nfs_secinfo_avg_latency","text":"

    Average latency of SECINFO operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_secinfo_no_name_avg_latency","title":"node_nfs_secinfo_no_name_avg_latency","text":"

    Average latency of SECINFO_NO_NAME operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node secinfo_no_name.average_latencyUnit: microsecType: averageBase: secinfo_no_name.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node secinfo_no_name.average_latencyUnit: microsecType: averageBase: secinfo_no_name.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node secinfo_no_name_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_no_name_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node secinfo_no_name_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_no_name_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_secinfo_no_name_total","title":"node_nfs_secinfo_no_name_total","text":"

    Total number of SECINFO_NO_NAME operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node secinfo_no_name.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node secinfo_no_name.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node secinfo_no_name_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node secinfo_no_name_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_secinfo_total","title":"node_nfs_secinfo_total","text":"

    Total number of SECINFO operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_sequence_avg_latency","title":"node_nfs_sequence_avg_latency","text":"

    Average latency of SEQUENCE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node sequence.average_latencyUnit: microsecType: averageBase: sequence.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node sequence.average_latencyUnit: microsecType: averageBase: sequence.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node sequence_avg_latencyUnit: microsecType: average,no-zero-valuesBase: sequence_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node sequence_avg_latencyUnit: microsecType: average,no-zero-valuesBase: sequence_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_sequence_total","title":"node_nfs_sequence_total","text":"

    Total number of SEQUENCE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node sequence.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node sequence.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node sequence_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node sequence_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_set_ssv_avg_latency","title":"node_nfs_set_ssv_avg_latency","text":"

    Average latency of SET_SSV operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node set_ssv.average_latencyUnit: microsecType: averageBase: set_ssv.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node set_ssv.average_latencyUnit: microsecType: averageBase: set_ssv.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node set_ssv_avg_latencyUnit: microsecType: average,no-zero-valuesBase: set_ssv_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node set_ssv_avg_latencyUnit: microsecType: average,no-zero-valuesBase: set_ssv_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_set_ssv_total","title":"node_nfs_set_ssv_total","text":"

    Total number of SET_SSV operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node set_ssv.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node set_ssv.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node set_ssv_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node set_ssv_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_setattr_avg_latency","title":"node_nfs_setattr_avg_latency","text":"

    Average latency of SetAttr procedure requests. The counter keeps track of the average response time of SetAttr requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setattr_total","title":"node_nfs_setattr_total","text":"

    Total number of Setattr procedure requests. It is the total number of Setattr success and setattr error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setclientid_avg_latency","title":"node_nfs_setclientid_avg_latency","text":"

    Average latency of SETCLIENTID procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node setclientid.average_latencyUnit: microsecType: averageBase: setclientid.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node setclientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setclientid_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setclientid_confirm_avg_latency","title":"node_nfs_setclientid_confirm_avg_latency","text":"

    Average latency of SETCLIENTID_CONFIRM procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node setclientid_confirm.average_latencyUnit: microsecType: averageBase: setclientid_confirm.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node setclientid_confirm_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setclientid_confirm_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setclientid_confirm_total","title":"node_nfs_setclientid_confirm_total","text":"

    Total number of SETCLIENTID_CONFIRM procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node setclientid_confirm.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node setclientid_confirm_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setclientid_total","title":"node_nfs_setclientid_total","text":"

    Total number of SETCLIENTID procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node setclientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node setclientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_symlink_avg_latency","title":"node_nfs_symlink_avg_latency","text":"

    Average latency of SymLink procedure requests. The counter keeps track of the average response time of SymLink requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node symlink.average_latencyUnit: microsecType: averageBase: symlink.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node symlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: symlink_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_symlink_total","title":"node_nfs_symlink_total","text":"

    Total number SymLink of procedure requests. It is the total number of SymLink success and create SymLink requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node symlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node symlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_test_stateid_avg_latency","title":"node_nfs_test_stateid_avg_latency","text":"

    Average latency of TEST_STATEID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node test_stateid.average_latencyUnit: microsecType: averageBase: test_stateid.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node test_stateid.average_latencyUnit: microsecType: averageBase: test_stateid.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node test_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: test_stateid_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node test_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: test_stateid_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_test_stateid_total","title":"node_nfs_test_stateid_total","text":"

    Total number of TEST_STATEID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node test_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node test_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node test_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node test_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_throughput","title":"node_nfs_throughput","text":"

    Rate of NFSv3 data transfers per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nfs41_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nfs42_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nfs4_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_total_ops","title":"node_nfs_total_ops","text":"

    Total number of NFSv3 procedure requests per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_verify_avg_latency","title":"node_nfs_verify_avg_latency","text":"

    Average latency of VERIFY operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_verify_total","title":"node_nfs_verify_total","text":"

    Total number of VERIFY operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_want_delegation_avg_latency","title":"node_nfs_want_delegation_avg_latency","text":"

    Average latency of WANT_DELEGATION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node want_delegation.average_latencyUnit: microsecType: averageBase: want_delegation.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node want_delegation.average_latencyUnit: microsecType: averageBase: want_delegation.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node want_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: want_delegation_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node want_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: want_delegation_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_want_delegation_total","title":"node_nfs_want_delegation_total","text":"

    Total number of WANT_DELEGATION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node want_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node want_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node want_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node want_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_write_avg_latency","title":"node_nfs_write_avg_latency","text":"

    Average latency of Write procedure requests. The counter keeps track of the average response time of Write requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_write_ops","title":"node_nfs_write_ops","text":"

    Total observed NFSv3 write operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_write_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_write_throughput","title":"node_nfs_write_throughput","text":"

    Rate of NFSv3 write data transfers per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nfs41_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nfs42_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nfs4_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_write_total","title":"node_nfs_write_total","text":"

    Total number of Write procedure requests. It is the total number of write success and write error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nvme_fc_data_recv","title":"node_nvme_fc_data_recv","text":"

    NVMe/FC kilobytes (KB) received per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_data_receivedUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvme_fc_data_sent","title":"node_nvme_fc_data_sent","text":"

    NVMe/FC kilobytes (KB) sent per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_data_sentUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvme_fc_ops","title":"node_nvme_fc_ops","text":"

    NVMe/FC operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvmf_data_recv","title":"node_nvmf_data_recv","text":"

    NVMe/FC kilobytes (KB) received per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_data_received, 1Unit: Type: Base: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node nvmf_data_recvUnit: Type: Base: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvmf_data_sent","title":"node_nvmf_data_sent","text":"

    NVMe/FC kilobytes (KB) sent per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_data_sent, 1Unit: Type: Base: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node nvmf_data_sentUnit: Type: Base: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvmf_ops","title":"node_nvmf_ops","text":"

    NVMe/FC operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_ops, 1Unit: Type: Base: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node nvmf_opsUnit: Type: Base: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_ssd_data_read","title":"node_ssd_data_read","text":"

    Number of SSD Disk kilobytes (KB) read per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node ssd_data_readUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node ssd_data_readUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_ssd_data_written","title":"node_ssd_data_written","text":"

    Number of SSD Disk kilobytes (KB) written per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node ssd_data_writtenUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node ssd_data_writtenUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_total_data","title":"node_total_data","text":"

    Total throughput in bytes

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_total_latency","title":"node_total_latency","text":"

    Average latency for all operations in the system in microseconds

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node total_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node total_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_total_ops","title":"node_total_ops","text":"

    Total number of operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/system:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_uptime","title":"node_uptime","text":"

    The total time, in seconds, that the node has been up.

    API Endpoint Metric Template REST api/cluster/nodes uptime conf/rest/9.12.0/node.yaml ZAPI system-node-get-iter node-details-info.node-uptime conf/zapi/cdot/9.8.0/node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_other_latency","title":"node_vol_cifs_other_latency","text":"

    Average time for the WAFL filesystem to process other CIFS operations to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.other_latencyUnit: microsecType: averageBase: cifs.other_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_other_latencyUnit: microsecType: averageBase: cifs_other_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_other_ops","title":"node_vol_cifs_other_ops","text":"

    Number of other CIFS operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_read_data","title":"node_vol_cifs_read_data","text":"

    Bytes read per second via CIFS

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_read_latency","title":"node_vol_cifs_read_latency","text":"

    Average time for the WAFL filesystem to process CIFS read requests to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.read_latencyUnit: microsecType: averageBase: cifs.read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_read_latencyUnit: microsecType: averageBase: cifs_read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_read_ops","title":"node_vol_cifs_read_ops","text":"

    Number of CIFS read operations per second from the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_write_data","title":"node_vol_cifs_write_data","text":"

    Bytes written per second via CIFS

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_write_latency","title":"node_vol_cifs_write_latency","text":"

    Average time for the WAFL filesystem to process CIFS write requests to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.write_latencyUnit: microsecType: averageBase: cifs.write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_write_latencyUnit: microsecType: averageBase: cifs_write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_write_ops","title":"node_vol_cifs_write_ops","text":"

    Number of CIFS write operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_other_latency","title":"node_vol_fcp_other_latency","text":"

    Average time for the WAFL filesystem to process other FCP protocol operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.other_latencyUnit: microsecType: averageBase: fcp.other_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_other_latencyUnit: microsecType: averageBase: fcp_other_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_other_ops","title":"node_vol_fcp_other_ops","text":"

    Number of other block protocol operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_read_data","title":"node_vol_fcp_read_data","text":"

    Bytes read per second via block protocol

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_read_latency","title":"node_vol_fcp_read_latency","text":"

    Average time for the WAFL filesystem to process FCP protocol read operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.read_latencyUnit: microsecType: averageBase: fcp.read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_read_latencyUnit: microsecType: averageBase: fcp_read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_read_ops","title":"node_vol_fcp_read_ops","text":"

    Number of block protocol read operations per second from the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_write_data","title":"node_vol_fcp_write_data","text":"

    Bytes written per second via block protocol

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_write_latency","title":"node_vol_fcp_write_latency","text":"

    Average time for the WAFL filesystem to process FCP protocol write operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.write_latencyUnit: microsecType: averageBase: fcp.write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_write_latencyUnit: microsecType: averageBase: fcp_write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_write_ops","title":"node_vol_fcp_write_ops","text":"

    Number of block protocol write operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_other_latency","title":"node_vol_iscsi_other_latency","text":"

    Average time for the WAFL filesystem to process other iSCSI protocol operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI protocol request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.other_latencyUnit: microsecType: averageBase: iscsi.other_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_other_latencyUnit: microsecType: averageBase: iscsi_other_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_other_ops","title":"node_vol_iscsi_other_ops","text":"

    Number of other block protocol operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_read_data","title":"node_vol_iscsi_read_data","text":"

    Bytes read per second via block protocol

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_read_latency","title":"node_vol_iscsi_read_latency","text":"

    Average time for the WAFL filesystem to process iSCSI protocol read operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI protocol request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.read_latencyUnit: microsecType: averageBase: iscsi.read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_read_latencyUnit: microsecType: averageBase: iscsi_read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_read_ops","title":"node_vol_iscsi_read_ops","text":"

    Number of block protocol read operations per second from the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_write_data","title":"node_vol_iscsi_write_data","text":"

    Bytes written per second via block protocol

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_write_latency","title":"node_vol_iscsi_write_latency","text":"

    Average time for the WAFL filesystem to process iSCSI protocol write operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.write_latencyUnit: microsecType: averageBase: iscsi.write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_write_latencyUnit: microsecType: averageBase: iscsi_write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_write_ops","title":"node_vol_iscsi_write_ops","text":"

    Number of block protocol write operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_other_latency","title":"node_vol_nfs_other_latency","text":"

    Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.other_latencyUnit: microsecType: averageBase: nfs.other_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_other_latencyUnit: microsecType: averageBase: nfs_other_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_other_ops","title":"node_vol_nfs_other_ops","text":"

    Number of other NFS operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_read_data","title":"node_vol_nfs_read_data","text":"

    Bytes read per second via NFS

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_read_latency","title":"node_vol_nfs_read_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.read_latencyUnit: microsecType: averageBase: nfs.read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_read_latencyUnit: microsecType: averageBase: nfs_read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_read_ops","title":"node_vol_nfs_read_ops","text":"

    Number of NFS read operations per second from the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_write_data","title":"node_vol_nfs_write_data","text":"

    Bytes written per second via NFS

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_write_latency","title":"node_vol_nfs_write_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.write_latencyUnit: microsecType: averageBase: nfs.write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_write_latencyUnit: microsecType: averageBase: nfs_write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_write_ops","title":"node_vol_nfs_write_ops","text":"

    Number of NFS write operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_read_latency","title":"node_vol_read_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_write_latency","title":"node_vol_write_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:node write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_volume_avg_latency","title":"node_volume_avg_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time. node_volume_avg_latency is volume_avg_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_access_latency","title":"node_volume_nfs_access_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol access requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_access_latency is volume_nfs_access_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.access_latencyUnit: microsecType: averageBase: nfs.access_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_access_latencyUnit: microsecType: averageBase: nfs_access_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_access_ops","title":"node_volume_nfs_access_ops","text":"

    Number of NFS accesses per second to the volume. node_volume_nfs_access_ops is volume_nfs_access_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.access_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_access_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_getattr_latency","title":"node_volume_nfs_getattr_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol getattr requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_getattr_latency is volume_nfs_getattr_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.getattr_latencyUnit: microsecType: averageBase: nfs.getattr_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_getattr_latencyUnit: microsecType: averageBase: nfs_getattr_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_getattr_ops","title":"node_volume_nfs_getattr_ops","text":"

    Number of NFS getattr per second to the volume. node_volume_nfs_getattr_ops is volume_nfs_getattr_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.getattr_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_getattr_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_lookup_latency","title":"node_volume_nfs_lookup_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol lookup requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_lookup_latency is volume_nfs_lookup_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.lookup_latencyUnit: microsecType: averageBase: nfs.lookup_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_lookup_latencyUnit: microsecType: averageBase: nfs_lookup_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_lookup_ops","title":"node_volume_nfs_lookup_ops","text":"

    Number of NFS lookups per second to the volume. node_volume_nfs_lookup_ops is volume_nfs_lookup_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.lookup_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_lookup_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_other_latency","title":"node_volume_nfs_other_latency","text":"

    Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_other_latency is volume_nfs_other_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.other_latencyUnit: microsecType: averageBase: nfs.other_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_other_latencyUnit: microsecType: averageBase: nfs_other_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_other_ops","title":"node_volume_nfs_other_ops","text":"

    Number of other NFS operations per second to the volume. node_volume_nfs_other_ops is volume_nfs_other_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_punch_hole_latency","title":"node_volume_nfs_punch_hole_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol hole-punch requests to the volume. node_volume_nfs_punch_hole_latency is volume_nfs_punch_hole_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.punch_hole_latencyUnit: microsecType: averageBase: nfs.punch_hole_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_punch_hole_latencyUnit: microsecType: averageBase: nfs_punch_hole_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_punch_hole_ops","title":"node_volume_nfs_punch_hole_ops","text":"

    Number of NFS hole-punch requests per second to the volume. node_volume_nfs_punch_hole_ops is volume_nfs_punch_hole_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.punch_hole_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_punch_hole_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_read_latency","title":"node_volume_nfs_read_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_read_latency is volume_nfs_read_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.read_latencyUnit: microsecType: averageBase: nfs.read_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_read_latencyUnit: microsecType: averageBase: nfs_read_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_read_ops","title":"node_volume_nfs_read_ops","text":"

    Number of NFS read operations per second from the volume. node_volume_nfs_read_ops is volume_nfs_read_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_setattr_latency","title":"node_volume_nfs_setattr_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol setattr requests to the volume. node_volume_nfs_setattr_latency is volume_nfs_setattr_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.setattr_latencyUnit: microsecType: averageBase: nfs.setattr_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_setattr_latencyUnit: microsecType: averageBase: nfs_setattr_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_setattr_ops","title":"node_volume_nfs_setattr_ops","text":"

    Number of NFS setattr requests per second to the volume. node_volume_nfs_setattr_ops is volume_nfs_setattr_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.setattr_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_setattr_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_total_ops","title":"node_volume_nfs_total_ops","text":"

    Number of total NFS operations per second to the volume. node_volume_nfs_total_ops is volume_nfs_total_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_write_latency","title":"node_volume_nfs_write_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency. node_volume_nfs_write_latency is volume_nfs_write_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.write_latencyUnit: microsecType: averageBase: nfs.write_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_write_latencyUnit: microsecType: averageBase: nfs_write_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_write_ops","title":"node_volume_nfs_write_ops","text":"

    Number of NFS write operations per second to the volume. node_volume_nfs_write_ops is volume_nfs_write_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_other_latency","title":"node_volume_other_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time. node_volume_other_latency is volume_other_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume other_latencyUnit: microsecType: averageBase: total_other_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_other_ops","title":"node_volume_other_ops","text":"

    Number of other operations per second to the volume. node_volume_other_ops is volume_other_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume total_other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_read_data","title":"node_volume_read_data","text":"

    Bytes read per second. node_volume_read_data is volume_read_data aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_readUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_read_latency","title":"node_volume_read_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time. node_volume_read_latency is volume_read_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_read_ops","title":"node_volume_read_ops","text":"

    Number of read operations per second from the volume. node_volume_read_ops is volume_read_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_total_ops","title":"node_volume_total_ops","text":"

    Number of operations per second serviced by the volume. node_volume_total_ops is volume_total_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_write_data","title":"node_volume_write_data","text":"

    Bytes written per second. node_volume_write_data is volume_write_data aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_writtenUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_write_latency","title":"node_volume_write_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time. node_volume_write_latency is volume_write_latency aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_write_ops","title":"node_volume_write_ops","text":"

    Number of write operations per second to the volume. node_volume_write_ops is volume_write_ops aggregated by node.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#nvme_lif_avg_latency","title":"nvme_lif_avg_latency","text":"

    Average latency for NVMF operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_avg_other_latency","title":"nvme_lif_avg_other_latency","text":"

    Average latency for operations other than read, write, compare or compare-and-write.

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_avg_read_latency","title":"nvme_lif_avg_read_latency","text":"

    Average latency for read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_avg_write_latency","title":"nvme_lif_avg_write_latency","text":"

    Average latency for write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_other_ops","title":"nvme_lif_other_ops","text":"

    Number of operations that are not read, write, compare or compare-and-write.

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_read_data","title":"nvme_lif_read_data","text":"

    Amount of data read from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_read_ops","title":"nvme_lif_read_ops","text":"

    Number of read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_total_ops","title":"nvme_lif_total_ops","text":"

    Total number of operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_write_data","title":"nvme_lif_write_data","text":"

    Amount of data written to the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_write_ops","title":"nvme_lif_write_ops","text":"

    Number of write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_avg_latency","title":"nvmf_rdma_port_avg_latency","text":"

    Average latency for NVMF operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_avg_other_latency","title":"nvmf_rdma_port_avg_other_latency","text":"

    Average latency for operations other than read, write, compare or compare-and-write

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_avg_read_latency","title":"nvmf_rdma_port_avg_read_latency","text":"

    Average latency for read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_avg_write_latency","title":"nvmf_rdma_port_avg_write_latency","text":"

    Average latency for write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_other_ops","title":"nvmf_rdma_port_other_ops","text":"

    Number of operations that are not read, write, compare or compare-and-right.

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port other_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_read_data","title":"nvmf_rdma_port_read_data","text":"

    Amount of data read from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port read_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_read_ops","title":"nvmf_rdma_port_read_ops","text":"

    Number of read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port read_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_total_data","title":"nvmf_rdma_port_total_data","text":"

    Amount of NVMF traffic to and from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port total_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_total_ops","title":"nvmf_rdma_port_total_ops","text":"

    Total number of operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port total_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_write_data","title":"nvmf_rdma_port_write_data","text":"

    Amount of data written to the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port write_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_write_ops","title":"nvmf_rdma_port_write_ops","text":"

    Number of write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port write_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_avg_latency","title":"nvmf_tcp_port_avg_latency","text":"

    Average latency for NVMF operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_avg_other_latency","title":"nvmf_tcp_port_avg_other_latency","text":"

    Average latency for operations other than read, write, compare or compare-and-write

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_avg_read_latency","title":"nvmf_tcp_port_avg_read_latency","text":"

    Average latency for read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_avg_write_latency","title":"nvmf_tcp_port_avg_write_latency","text":"

    Average latency for write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_other_ops","title":"nvmf_tcp_port_other_ops","text":"

    Number of operations that are not read, write, compare or compare-and-write.

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port other_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_read_data","title":"nvmf_tcp_port_read_data","text":"

    Amount of data read from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port read_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_read_ops","title":"nvmf_tcp_port_read_ops","text":"

    Number of read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port read_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_total_data","title":"nvmf_tcp_port_total_data","text":"

    Amount of NVMF traffic to and from the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port total_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_total_ops","title":"nvmf_tcp_port_total_ops","text":"

    Total number of operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port total_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_write_data","title":"nvmf_tcp_port_write_data","text":"

    Amount of data written to the storage system

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port write_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_write_ops","title":"nvmf_tcp_port_write_ops","text":"

    Number of write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port write_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#ontaps3_logical_used_size","title":"ontaps3_logical_used_size","text":"

    Specifies the bucket logical used size up to this point. This field cannot be specified using a POST or PATCH method.

    API Endpoint Metric Template REST api/protocols/s3/buckets logical_used_size conf/rest/9.7.0/ontap_s3.yaml"},{"location":"ontap-metrics/#ontaps3_object_count","title":"ontaps3_object_count","text":"API Endpoint Metric Template REST api/private/cli/vserver/object-store-server/bucket object_count conf/rest/9.7.0/ontap_s3.yaml"},{"location":"ontap-metrics/#ontaps3_size","title":"ontaps3_size","text":"

    Specifies the bucket size in bytes; ranges from 190MB to 62PB.

    API Endpoint Metric Template REST api/protocols/s3/buckets size conf/rest/9.7.0/ontap_s3.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_failed","title":"ontaps3_svm_abort_multipart_upload_failed","text":"

    Number of failed Abort Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_failed_client_close","title":"ontaps3_svm_abort_multipart_upload_failed_client_close","text":"

    Number of times Abort Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_latency","title":"ontaps3_svm_abort_multipart_upload_latency","text":"

    Average latency for Abort Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_latencyUnit: microsecType: averageBase: abort_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: abort_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_rate","title":"ontaps3_svm_abort_multipart_upload_rate","text":"

    Number of Abort Multipart Upload operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_total","title":"ontaps3_svm_abort_multipart_upload_total","text":"

    Number of Abort Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_allow_access","title":"ontaps3_svm_allow_access","text":"

    Number of times access was allowed.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server allow_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server allow_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_anonymous_access","title":"ontaps3_svm_anonymous_access","text":"

    Number of times anonymous access was allowed.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server anonymous_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server anonymous_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_anonymous_deny_access","title":"ontaps3_svm_anonymous_deny_access","text":"

    Number of times anonymous access was denied.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server anonymous_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server anonymous_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_authentication_failures","title":"ontaps3_svm_authentication_failures","text":"

    Number of authentication failures.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server authentication_failuresUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server authentication_failuresUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_chunked_upload_reqs","title":"ontaps3_svm_chunked_upload_reqs","text":"

    Total number of object store server chunked object upload requests

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server chunked_upload_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server chunked_upload_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_failed","title":"ontaps3_svm_complete_multipart_upload_failed","text":"

    Number of failed Complete Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_failed_client_close","title":"ontaps3_svm_complete_multipart_upload_failed_client_close","text":"

    Number of times Complete Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_latency","title":"ontaps3_svm_complete_multipart_upload_latency","text":"

    Average latency for Complete Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_latencyUnit: microsecType: averageBase: complete_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: complete_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_rate","title":"ontaps3_svm_complete_multipart_upload_rate","text":"

    Number of Complete Multipart Upload operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_total","title":"ontaps3_svm_complete_multipart_upload_total","text":"

    Number of Complete Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_connected_connections","title":"ontaps3_svm_connected_connections","text":"

    Number of object store server connections currently established

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server connected_connectionsUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server connected_connectionsUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_connections","title":"ontaps3_svm_connections","text":"

    Total number of object store server connections.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server connectionsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server connectionsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_failed","title":"ontaps3_svm_create_bucket_failed","text":"

    Number of failed Create Bucket operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_failed_client_close","title":"ontaps3_svm_create_bucket_failed_client_close","text":"

    Number of times Create Bucket operation failed because client terminated connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_latency","title":"ontaps3_svm_create_bucket_latency","text":"

    Average latency for Create Bucket operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_latencyUnit: microsecType: averageBase: create_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: create_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_rate","title":"ontaps3_svm_create_bucket_rate","text":"

    Number of Create Bucket operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_total","title":"ontaps3_svm_create_bucket_total","text":"

    Number of Create Bucket operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_default_deny_access","title":"ontaps3_svm_default_deny_access","text":"

    Number of times access was denied by default and not through any policy statement.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server default_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server default_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_failed","title":"ontaps3_svm_delete_bucket_failed","text":"

    Number of failed Delete Bucket operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_failed_client_close","title":"ontaps3_svm_delete_bucket_failed_client_close","text":"

    Number of times Delete Bucket operation failed because client terminated connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_latency","title":"ontaps3_svm_delete_bucket_latency","text":"

    Average latency for Delete Bucket operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_latencyUnit: microsecType: averageBase: delete_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: delete_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_rate","title":"ontaps3_svm_delete_bucket_rate","text":"

    Number of Delete Bucket operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_total","title":"ontaps3_svm_delete_bucket_total","text":"

    Number of Delete Bucket operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_failed","title":"ontaps3_svm_delete_object_failed","text":"

    Number of failed DELETE object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_failed_client_close","title":"ontaps3_svm_delete_object_failed_client_close","text":"

    Number of times DELETE object operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_latency","title":"ontaps3_svm_delete_object_latency","text":"

    Average latency for DELETE object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_latencyUnit: microsecType: averageBase: delete_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_latencyUnit: microsecType: average,no-zero-valuesBase: delete_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_rate","title":"ontaps3_svm_delete_object_rate","text":"

    Number of DELETE object operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_failed","title":"ontaps3_svm_delete_object_tagging_failed","text":"

    Number of failed DELETE object tagging operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_failed_client_close","title":"ontaps3_svm_delete_object_tagging_failed_client_close","text":"

    Number of times DELETE object tagging operation failed because client terminated connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_latency","title":"ontaps3_svm_delete_object_tagging_latency","text":"

    Average latency for DELETE object tagging operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_latencyUnit: microsecType: averageBase: delete_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: delete_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_rate","title":"ontaps3_svm_delete_object_tagging_rate","text":"

    Number of DELETE object tagging operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_total","title":"ontaps3_svm_delete_object_tagging_total","text":"

    Number of DELETE object tagging operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_total","title":"ontaps3_svm_delete_object_total","text":"

    Number of DELETE object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_explicit_deny_access","title":"ontaps3_svm_explicit_deny_access","text":"

    Number of times access was denied explicitly by a policy statement.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server explicit_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server explicit_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_bucket_acl_failed","title":"ontaps3_svm_get_bucket_acl_failed","text":"

    Number of failed GET Bucket ACL operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_acl_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_acl_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_bucket_acl_total","title":"ontaps3_svm_get_bucket_acl_total","text":"

    Number of GET Bucket ACL operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_acl_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_acl_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_bucket_versioning_failed","title":"ontaps3_svm_get_bucket_versioning_failed","text":"

    Number of failed Get Bucket Versioning operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_versioning_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_versioning_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_bucket_versioning_total","title":"ontaps3_svm_get_bucket_versioning_total","text":"

    Number of Get Bucket Versioning operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_versioning_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_versioning_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_data","title":"ontaps3_svm_get_data","text":"

    Rate of GET object data transfers per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_acl_failed","title":"ontaps3_svm_get_object_acl_failed","text":"

    Number of failed GET Object ACL operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_acl_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_acl_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_acl_total","title":"ontaps3_svm_get_object_acl_total","text":"

    Number of GET Object ACL operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_acl_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_acl_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_failed","title":"ontaps3_svm_get_object_failed","text":"

    Number of failed GET object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_failed_client_close","title":"ontaps3_svm_get_object_failed_client_close","text":"

    Number of times GET object operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_lastbyte_latency","title":"ontaps3_svm_get_object_lastbyte_latency","text":"

    Average last-byte latency for GET object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_lastbyte_latencyUnit: microsecType: averageBase: get_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_lastbyte_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_lastbyte_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_latency","title":"ontaps3_svm_get_object_latency","text":"

    Average first-byte latency for GET object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_latencyUnit: microsecType: averageBase: get_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_rate","title":"ontaps3_svm_get_object_rate","text":"

    Number of GET object operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_failed","title":"ontaps3_svm_get_object_tagging_failed","text":"

    Number of failed GET object tagging operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_failed_client_close","title":"ontaps3_svm_get_object_tagging_failed_client_close","text":"

    Number of times GET object tagging operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_latency","title":"ontaps3_svm_get_object_tagging_latency","text":"

    Average latency for GET object tagging operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_latencyUnit: microsecType: averageBase: get_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_rate","title":"ontaps3_svm_get_object_tagging_rate","text":"

    Number of GET object tagging operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_total","title":"ontaps3_svm_get_object_tagging_total","text":"

    Number of GET object tagging operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_total","title":"ontaps3_svm_get_object_total","text":"

    Number of GET object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_group_policy_evaluated","title":"ontaps3_svm_group_policy_evaluated","text":"

    Number of times group policies were evaluated.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server group_policy_evaluatedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server group_policy_evaluatedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_failed","title":"ontaps3_svm_head_bucket_failed","text":"

    Number of failed HEAD bucket operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_failed_client_close","title":"ontaps3_svm_head_bucket_failed_client_close","text":"

    Number of times HEAD bucket operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_latency","title":"ontaps3_svm_head_bucket_latency","text":"

    Average latency for HEAD bucket operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_latencyUnit: microsecType: averageBase: head_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: head_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_rate","title":"ontaps3_svm_head_bucket_rate","text":"

    Number of HEAD bucket operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_total","title":"ontaps3_svm_head_bucket_total","text":"

    Number of HEAD bucket operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_failed","title":"ontaps3_svm_head_object_failed","text":"

    Number of failed HEAD Object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_failed_client_close","title":"ontaps3_svm_head_object_failed_client_close","text":"

    Number of times HEAD object operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_latency","title":"ontaps3_svm_head_object_latency","text":"

    Average latency for HEAD object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_latencyUnit: microsecType: averageBase: head_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_latencyUnit: microsecType: average,no-zero-valuesBase: head_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_rate","title":"ontaps3_svm_head_object_rate","text":"

    Number of HEAD Object operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_total","title":"ontaps3_svm_head_object_total","text":"

    Number of HEAD Object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_failed","title":"ontaps3_svm_initiate_multipart_upload_failed","text":"

    Number of failed Initiate Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_failed_client_close","title":"ontaps3_svm_initiate_multipart_upload_failed_client_close","text":"

    Number of times Initiate Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_latency","title":"ontaps3_svm_initiate_multipart_upload_latency","text":"

    Average latency for Initiate Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_latencyUnit: microsecType: averageBase: initiate_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: initiate_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_rate","title":"ontaps3_svm_initiate_multipart_upload_rate","text":"

    Number of Initiate Multipart Upload operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_total","title":"ontaps3_svm_initiate_multipart_upload_total","text":"

    Number of Initiate Multipart Upload operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_input_flow_control_entry","title":"ontaps3_svm_input_flow_control_entry","text":"

    Number of times input flow control was entered.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server input_flow_control_entryUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server input_flow_control_entryUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_input_flow_control_exit","title":"ontaps3_svm_input_flow_control_exit","text":"

    Number of times input flow control was exited.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server input_flow_control_exitUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server input_flow_control_exitUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_failed","title":"ontaps3_svm_list_buckets_failed","text":"

    Number of failed LIST Buckets operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_failed_client_close","title":"ontaps3_svm_list_buckets_failed_client_close","text":"

    Number of times LIST Bucket operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_latency","title":"ontaps3_svm_list_buckets_latency","text":"

    Average latency for LIST Buckets operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_latencyUnit: microsecType: averageBase: list_buckets_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_latencyUnit: microsecType: average,no-zero-valuesBase: head_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_rate","title":"ontaps3_svm_list_buckets_rate","text":"

    Number of LIST Buckets operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_total","title":"ontaps3_svm_list_buckets_total","text":"

    Number of LIST Buckets operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_failed","title":"ontaps3_svm_list_object_versions_failed","text":"

    Number of failed LIST object versions operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_failed_client_close","title":"ontaps3_svm_list_object_versions_failed_client_close","text":"

    Number of times LIST object versions operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_latency","title":"ontaps3_svm_list_object_versions_latency","text":"

    Average latency for LIST Object versions operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_latencyUnit: microsecType: averageBase: list_object_versions_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_latencyUnit: microsecType: average,no-zero-valuesBase: list_object_versions_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_rate","title":"ontaps3_svm_list_object_versions_rate","text":"

    Number of LIST Object Versions operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_total","title":"ontaps3_svm_list_object_versions_total","text":"

    Number of LIST Object Versions operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_failed","title":"ontaps3_svm_list_objects_failed","text":"

    Number of failed LIST objects operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_failed_client_close","title":"ontaps3_svm_list_objects_failed_client_close","text":"

    Number of times LIST objects operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_latency","title":"ontaps3_svm_list_objects_latency","text":"

    Average latency for LIST Objects operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_latencyUnit: microsecType: averageBase: list_objects_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_latencyUnit: microsecType: average,no-zero-valuesBase: list_objects_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_rate","title":"ontaps3_svm_list_objects_rate","text":"

    Number of LIST Objects operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_total","title":"ontaps3_svm_list_objects_total","text":"

    Number of LIST Objects operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_failed","title":"ontaps3_svm_list_uploads_failed","text":"

    Number of failed LIST Upload operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_failed_client_close","title":"ontaps3_svm_list_uploads_failed_client_close","text":"

    Number of times LIST Upload operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_latency","title":"ontaps3_svm_list_uploads_latency","text":"

    Average latency for LIST Upload operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_latencyUnit: microsecType: averageBase: list_uploads_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_latencyUnit: microsecType: average,no-zero-valuesBase: list_uploads_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_rate","title":"ontaps3_svm_list_uploads_rate","text":"

    Number of LIST Upload operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_total","title":"ontaps3_svm_list_uploads_total","text":"

    Number of LIST Upload operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_max_cmds_per_connection","title":"ontaps3_svm_max_cmds_per_connection","text":"

    Maximum commands pipelined at any instance on a connection.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_commands_per_connectionUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_cmds_per_connectionUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_max_connected_connections","title":"ontaps3_svm_max_connected_connections","text":"

    Maximum number of object store server connections established at one time

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_connected_connectionsUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_connected_connectionsUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_max_requests_outstanding","title":"ontaps3_svm_max_requests_outstanding","text":"

    Maximum number of object store server requests in process at one time

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_requests_outstandingUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_requests_outstandingUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_multi_delete_reqs","title":"ontaps3_svm_multi_delete_reqs","text":"

    Total number of object store server multiple object delete requests

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server multiple_delete_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server multi_delete_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_output_flow_control_entry","title":"ontaps3_svm_output_flow_control_entry","text":"

    Number of output flow control was entered.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server output_flow_control_entryUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server output_flow_control_entryUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_output_flow_control_exit","title":"ontaps3_svm_output_flow_control_exit","text":"

    Number of times output flow control was exited.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server output_flow_control_exitUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server output_flow_control_exitUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_presigned_url_reqs","title":"ontaps3_svm_presigned_url_reqs","text":"

    Total number of presigned object store server URL requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server presigned_url_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server presigned_url_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_bucket_versioning_failed","title":"ontaps3_svm_put_bucket_versioning_failed","text":"

    Number of failed Put Bucket Versioning operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_bucket_versioning_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_bucket_versioning_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_bucket_versioning_total","title":"ontaps3_svm_put_bucket_versioning_total","text":"

    Number of Put Bucket Versioning operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_bucket_versioning_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_bucket_versioning_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_data","title":"ontaps3_svm_put_data","text":"

    Rate of PUT object data transfers per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_failed","title":"ontaps3_svm_put_object_failed","text":"

    Number of failed PUT object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_failed_client_close","title":"ontaps3_svm_put_object_failed_client_close","text":"

    Number of times PUT object operation failed due to the case where client closed the connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_latency","title":"ontaps3_svm_put_object_latency","text":"

    Average latency for PUT object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_latencyUnit: microsecType: averageBase: put_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_latencyUnit: microsecType: average,no-zero-valuesBase: put_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_rate","title":"ontaps3_svm_put_object_rate","text":"

    Number of PUT object operations per second

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_failed","title":"ontaps3_svm_put_object_tagging_failed","text":"

    Number of failed PUT object tagging operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_failed_client_close","title":"ontaps3_svm_put_object_tagging_failed_client_close","text":"

    Number of times PUT object tagging operation failed because client terminated connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_latency","title":"ontaps3_svm_put_object_tagging_latency","text":"

    Average latency for PUT object tagging operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_latencyUnit: microsecType: averageBase: put_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: put_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_rate","title":"ontaps3_svm_put_object_tagging_rate","text":"

    Number of PUT object tagging operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_total","title":"ontaps3_svm_put_object_tagging_total","text":"

    Number of PUT object tagging operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_total","title":"ontaps3_svm_put_object_total","text":"

    Number of PUT object operations

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_request_parse_errors","title":"ontaps3_svm_request_parse_errors","text":"

    Number of request parser errors due to malformed requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server request_parse_errorsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server request_parse_errorsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_requests","title":"ontaps3_svm_requests","text":"

    Total number of object store server requests

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server requestsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_requests_outstanding","title":"ontaps3_svm_requests_outstanding","text":"

    Number of object store server requests in process

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server requests_outstandingUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server requests_outstandingUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_root_user_access","title":"ontaps3_svm_root_user_access","text":"

    Number of times access was done by root user.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server root_user_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server root_user_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_server_connection_close","title":"ontaps3_svm_server_connection_close","text":"

    Number of connection closes triggered by server due to fatal errors.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server server_connection_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server server_connection_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_signature_v2_reqs","title":"ontaps3_svm_signature_v2_reqs","text":"

    Total number of object store server signature V2 requests

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server signature_v2_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server signature_v2_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_signature_v4_reqs","title":"ontaps3_svm_signature_v4_reqs","text":"

    Total number of object store server signature V4 requests

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server signature_v4_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server signature_v4_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_tagging","title":"ontaps3_svm_tagging","text":"

    Number of requests with tagging specified.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server taggingUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server taggingUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_failed","title":"ontaps3_svm_upload_part_failed","text":"

    Number of failed Upload Part operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_failed_client_close","title":"ontaps3_svm_upload_part_failed_client_close","text":"

    Number of times Upload Part operation failed because client terminated connection while the operation was still pending on server.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_latency","title":"ontaps3_svm_upload_part_latency","text":"

    Average latency for Upload Part operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_latencyUnit: microsecType: averageBase: upload_part_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_latencyUnit: microsecType: average,no-zero-valuesBase: upload_part_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_rate","title":"ontaps3_svm_upload_part_rate","text":"

    Number of Upload Part operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_total","title":"ontaps3_svm_upload_part_total","text":"

    Number of Upload Part operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_used_percent","title":"ontaps3_used_percent","text":"

    The used_percent metric the percentage of a bucket's total capacity that is currently being used.

    API Endpoint Metric Template REST api/protocols/s3/buckets logical_used_size, size conf/rest/9.7.0/ontap_s3.yaml"},{"location":"ontap-metrics/#path_read_data","title":"path_read_data","text":"

    The average read throughput in kilobytes per second read from the indicated target port by the controller.

    API Endpoint Metric Template REST api/cluster/counter/tables/path read_dataUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path read_dataUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_read_iops","title":"path_read_iops","text":"

    The number of I/O read operations sent from the initiator port to the indicated target port.

    API Endpoint Metric Template REST api/cluster/counter/tables/path read_iopsUnit: per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path read_iopsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_read_latency","title":"path_read_latency","text":"

    The average latency of I/O read operations sent from this controller to the indicated target port.

    API Endpoint Metric Template REST api/cluster/counter/tables/path read_latencyUnit: microsecType: averageBase: read_iops conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path read_latencyUnit: microsecType: averageBase: read_iops conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_total_data","title":"path_total_data","text":"

    The average throughput in kilobytes per second read and written from/to the indicated target port by the controller.

    API Endpoint Metric Template REST api/cluster/counter/tables/path total_dataUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path total_dataUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_total_iops","title":"path_total_iops","text":"

    The number of total read/write I/O operations sent from the initiator port to the indicated target port.

    API Endpoint Metric Template REST api/cluster/counter/tables/path total_iopsUnit: per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path total_iopsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_write_data","title":"path_write_data","text":"

    The average write throughput in kilobytes per second written to the indicated target port by the controller.

    API Endpoint Metric Template REST api/cluster/counter/tables/path write_dataUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path write_dataUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_write_iops","title":"path_write_iops","text":"

    The number of I/O write operations sent from the initiator port to the indicated target port.

    API Endpoint Metric Template REST api/cluster/counter/tables/path write_iopsUnit: per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path write_iopsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_write_latency","title":"path_write_latency","text":"

    The average latency of I/O write operations sent from this controller to the indicated target port.

    API Endpoint Metric Template REST api/cluster/counter/tables/path write_latencyUnit: microsecType: averageBase: write_iops conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path write_latencyUnit: microsecType: averageBase: write_iops conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#plex_disk_busy","title":"plex_disk_busy","text":"

    The utilization percent of the disk. plex_disk_busy is disk_busy aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_capacity","title":"plex_disk_capacity","text":"

    Disk capacity in MB. plex_disk_capacity is disk_capacity aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_cp_read_chain","title":"plex_disk_cp_read_chain","text":"

    Average number of blocks transferred in each consistency point read operation during a CP. plex_disk_cp_read_chain is disk_cp_read_chain aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_cp_read_latency","title":"plex_disk_cp_read_latency","text":"

    Average latency per block in microseconds for consistency point read operations. plex_disk_cp_read_latency is disk_cp_read_latency aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_cp_reads","title":"plex_disk_cp_reads","text":"

    Number of disk read operations initiated each second for consistency point processing. plex_disk_cp_reads is disk_cp_reads aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_io_pending","title":"plex_disk_io_pending","text":"

    Average number of I/Os issued to the disk for which we have not yet received the response. plex_disk_io_pending is disk_io_pending aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_io_queued","title":"plex_disk_io_queued","text":"

    Number of I/Os queued to the disk but not yet issued. plex_disk_io_queued is disk_io_queued aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_total_data","title":"plex_disk_total_data","text":"

    Total throughput for user operations per second. plex_disk_total_data is disk_total_data aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_total_transfers","title":"plex_disk_total_transfers","text":"

    Total number of disk operations involving data transfer initiated per second. plex_disk_total_transfers is disk_total_transfers aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_read_blocks","title":"plex_disk_user_read_blocks","text":"

    Number of blocks transferred for user read operations per second. plex_disk_user_read_blocks is disk_user_read_blocks aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_read_chain","title":"plex_disk_user_read_chain","text":"

    Average number of blocks transferred in each user read operation. plex_disk_user_read_chain is disk_user_read_chain aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_read_latency","title":"plex_disk_user_read_latency","text":"

    Average latency per block in microseconds for user read operations. plex_disk_user_read_latency is disk_user_read_latency aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_reads","title":"plex_disk_user_reads","text":"

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. plex_disk_user_reads is disk_user_reads aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_write_blocks","title":"plex_disk_user_write_blocks","text":"

    Number of blocks transferred for user write operations per second. plex_disk_user_write_blocks is disk_user_write_blocks aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_write_chain","title":"plex_disk_user_write_chain","text":"

    Average number of blocks transferred in each user write operation. plex_disk_user_write_chain is disk_user_write_chain aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_write_latency","title":"plex_disk_user_write_latency","text":"

    Average latency per block in microseconds for user write operations. plex_disk_user_write_latency is disk_user_write_latency aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_writes","title":"plex_disk_user_writes","text":"

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. plex_disk_user_writes is disk_user_writes aggregated by plex.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#qos_concurrency","title":"qos_concurrency","text":"

    This is the average number of concurrent requests for the workload.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume concurrencyUnit: noneType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume concurrencyUnit: noneType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_detail_resource_latency","title":"qos_detail_resource_latency","text":"

    This refers to the average latency for workloads within the subsystems of Data ONTAP. These subsystems are the various modules or components within the system that could contribute to delays or latency during data or task processing. The calculated latency includes both the processing time within the subsystem and the waiting time at that subsystem. Below is the description of subsystems' latency.

    • frontend: Represents the delays in the network layer of ONTAP.
    • backend: Represents the delays in the data/WAFL layer of ONTAP.
    • cluster: Represents delays caused by the cluster switches, cables, and adapters which physically connect clustered nodes.If the cluster interconnect component is in contention, it means high wait time for I/O requests at the cluster interconnect is impacting the latency of one or more workloads.
    • cp: Represents delays due to buffered write flushes, called consistency points (cp).
    • disk: Represents slowness due to attached hard drives or solid state drives.
    • network: Note: Typically these latencies only apply to SAN not NAS. Represents the wait time of I/O requests by the external networking protocols on the cluster. The wait time is time spent waiting for transfer ready transactions to finish before the cluster can respond to an I/O request. If the network component is in contention, it means high wait time at the protocol layer is impacting the latency of one or more workloads.
    • nvlog: Represents delays due to mirroring writes to the NVRAM/NVLOG memory and to the HA partner NVRAM/NVLOG memory.
    • suspend: Represents delays due to operations suspending on a delay mechanism. Typically this is diagnosed by NetApp Support.
    • throttle: Represents the throughput maximum (ceiling) setting of the storage Quality of Service (QoS) policy group assigned to the workload. If the policy group component is in contention, it means all workloads in the policy group are being throttled by the set throughput limit, which is impacting the latency of one or more of those workloads.
    • qos_min: Represents the latency to a workload that is being caused by QoS throughput floor (expected) setting assigned to other workloads. If the QoS floor set on certain workloads use the majority of the bandwidth to guarantee the promised throughput, other workloads will be throttled and see more latency.
    • cloud: Represents the software component in the cluster involved with I/O processing between the cluster and the cloud tier on which user data is stored. If the cloud latency component is in contention, it means that a large amount of reads from volumes that are hosted on the cloud tier are impacting the latency of one or more workloads.
    API Endpoint Metric Template REST api/cluster/counter/tables/qos_detail Harvest generatedUnit: microsecondsType: averageBase: ops conf/restperf/9.12.0/workload_detail.yaml ZAPI perf-object-get-instances workload_detail Harvest generatedUnit: microsecondsType: averageBase: ops conf/zapiperf/9.12.0/workload_detail.yaml"},{"location":"ontap-metrics/#qos_detail_service_time_latency","title":"qos_detail_service_time_latency","text":"

    This refers to the average service time for workload within the subsystems of the Data ONTAP. These subsystems are the various modules or components within the system that could contribute to delays or latency during data or task processing. This latency is the processing time within the subsystem.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_detail Harvest generatedUnit: microsecondsType: averageBase: ops conf/restperf/9.12.0/workload_detail.yaml ZAPI perf-object-get-instances workload_detail Harvest generatedUnit: microsecondsType: averageBase: ops conf/zapiperf/9.12.0/workload_detail.yaml"},{"location":"ontap-metrics/#qos_latency","title":"qos_latency","text":"

    This is the average response time for requests that were initiated by the workload.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume latencyUnit: microsecType: averageBase: ops conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume latencyUnit: microsecType: average,no-zero-valuesBase: ops conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_ops","title":"qos_ops","text":"

    This field is the workload's rate of operations that completed during the measurement interval; measured per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume opsUnit: per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_other_ops","title":"qos_other_ops","text":"

    This is the rate of this workload's other operations that completed during the measurement interval.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/workload.yaml ZAPI perf-object-get-instances workload_volume other_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_read_data","title":"qos_read_data","text":"

    This is the amount of data read per second from the filer by the workload.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume read_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_read_io_type","title":"qos_read_io_type","text":"

    This is the percentage of read requests served from various components (such as buffer cache, ext_cache, disk, etc.).

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume read_io_type_percentUnit: percentType: percentBase: read_io_type_base conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume read_io_typeUnit: percentType: percentBase: read_io_type_base conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_read_latency","title":"qos_read_latency","text":"

    This is the average response time for read requests that were initiated by the workload.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume read_latencyUnit: microsecType: average,no-zero-valuesBase: read_ops conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_read_ops","title":"qos_read_ops","text":"

    This is the rate of this workload's read operations that completed during the measurement interval.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume read_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_sequential_reads","title":"qos_sequential_reads","text":"

    This is the percentage of reads, performed on behalf of the workload, that were sequential.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume sequential_reads_percentUnit: percentType: percentBase: sequential_reads_base conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume sequential_readsUnit: percentType: percent,no-zero-valuesBase: sequential_reads_base conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_sequential_writes","title":"qos_sequential_writes","text":"

    This is the percentage of writes, performed on behalf of the workload, that were sequential. This counter is only available on platforms with more than 4GB of NVRAM.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume sequential_writes_percentUnit: percentType: percentBase: sequential_writes_base conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume sequential_writesUnit: percentType: percent,no-zero-valuesBase: sequential_writes_base conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_total_data","title":"qos_total_data","text":"

    This is the total amount of data read/written per second from/to the filer by the workload.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume total_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_write_data","title":"qos_write_data","text":"

    This is the amount of data written per second to the filer by the workload.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume write_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_write_latency","title":"qos_write_latency","text":"

    This is the average response time for write requests that were initiated by the workload.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume write_latencyUnit: microsecType: average,no-zero-valuesBase: write_ops conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_write_ops","title":"qos_write_ops","text":"

    This is the workload's write operations that completed during the measurement interval; measured per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume write_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qtree_cifs_ops","title":"qtree_cifs_ops","text":"

    Number of CIFS operations per second to the qtree

    API Endpoint Metric Template REST api/cluster/counter/tables/qtree cifs_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/qtree.yaml ZAPI perf-object-get-instances qtree cifs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#qtree_id","title":"qtree_id","text":"

    The identifier for the qtree, unique within the qtree's volume.

    API Endpoint Metric Template REST api/storage/qtrees id conf/rest/9.12.0/qtree.yaml"},{"location":"ontap-metrics/#qtree_internal_ops","title":"qtree_internal_ops","text":"

    Number of internal operations generated by activites such as snapmirror and backup per second to the qtree

    API Endpoint Metric Template REST api/cluster/counter/tables/qtree internal_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/qtree.yaml ZAPI perf-object-get-instances qtree internal_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#qtree_nfs_ops","title":"qtree_nfs_ops","text":"

    Number of NFS operations per second to the qtree

    API Endpoint Metric Template REST api/cluster/counter/tables/qtree nfs_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/qtree.yaml ZAPI perf-object-get-instances qtree nfs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#qtree_total_ops","title":"qtree_total_ops","text":"

    Summation of NFS ops, CIFS ops, CSS ops and internal ops

    API Endpoint Metric Template REST api/cluster/counter/tables/qtree total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/qtree.yaml ZAPI perf-object-get-instances qtree total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_limit","title":"quota_disk_limit","text":"

    Maximum amount of disk space, in kilobytes, allowed for the quota target (hard disk space limit). The value is -1 if the limit is unlimited.

    API Endpoint Metric Template REST api/storage/quota/reports space.hard_limit conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter disk-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_used","title":"quota_disk_used","text":"

    Current amount of disk space, in kilobytes, used by the quota target.

    API Endpoint Metric Template REST api/storage/quota/reports space.used.total conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter disk-used conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_used_pct_disk_limit","title":"quota_disk_used_pct_disk_limit","text":"

    Current disk space used expressed as a percentage of hard disk limit.

    API Endpoint Metric Template REST api/storage/quota/reports space.used.hard_limit_percent conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter disk-used-pct-disk-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_used_pct_soft_disk_limit","title":"quota_disk_used_pct_soft_disk_limit","text":"

    Current disk space used expressed as a percentage of soft disk limit.

    API Endpoint Metric Template REST api/storage/quota/reports space.used.soft_limit_percent conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter disk-used-pct-soft-disk-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_used_pct_threshold","title":"quota_disk_used_pct_threshold","text":"

    Current disk space used expressed as a percentage of threshold.

    API Endpoint Metric Template ZAPI quota-report-iter disk-used-pct-threshold conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_file_limit","title":"quota_file_limit","text":"

    Maximum number of files allowed for the quota target (hard files limit). The value is -1 if the limit is unlimited.

    API Endpoint Metric Template REST api/storage/quota/reports files.hard_limit conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter file-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_files_used","title":"quota_files_used","text":"

    Current number of files used by the quota target.

    API Endpoint Metric Template REST api/storage/quota/reports files.used.total conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter files-used conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_files_used_pct_file_limit","title":"quota_files_used_pct_file_limit","text":"

    Current number of files used expressed as a percentage of hard file limit.

    API Endpoint Metric Template REST api/storage/quota/reports files.used.hard_limit_percent conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter files-used-pct-file-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_files_used_pct_soft_file_limit","title":"quota_files_used_pct_soft_file_limit","text":"

    Current number of files used expressed as a percentage of soft file limit.

    API Endpoint Metric Template REST api/storage/quota/reports files.used.soft_limit_percent conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter files-used-pct-soft-file-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_soft_disk_limit","title":"quota_soft_disk_limit","text":"

    soft disk space limit, in kilobytes, for the quota target. The value is -1 if the limit is unlimited.

    API Endpoint Metric Template REST api/storage/quota/reports space.soft_limit conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter soft-disk-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_soft_file_limit","title":"quota_soft_file_limit","text":"

    Soft file limit, in number of files, for the quota target. The value is -1 if the limit is unlimited.

    API Endpoint Metric Template REST api/storage/quota/reports files.soft_limit conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter soft-file-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_threshold","title":"quota_threshold","text":"

    Disk space threshold, in kilobytes, for the quota target. The value is -1 if the limit is unlimited.

    API Endpoint Metric Template ZAPI quota-report-iter threshold conf/zapi/cdot/9.8.0/qtree.yaml REST NA Harvest generated conf/rest/9.12.0/qtree.yaml"},{"location":"ontap-metrics/#raid_disk_busy","title":"raid_disk_busy","text":"

    The utilization percent of the disk. raid_disk_busy is disk_busy aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_capacity","title":"raid_disk_capacity","text":"

    Disk capacity in MB. raid_disk_capacity is disk_capacity aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_cp_read_chain","title":"raid_disk_cp_read_chain","text":"

    Average number of blocks transferred in each consistency point read operation during a CP. raid_disk_cp_read_chain is disk_cp_read_chain aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_cp_read_latency","title":"raid_disk_cp_read_latency","text":"

    Average latency per block in microseconds for consistency point read operations. raid_disk_cp_read_latency is disk_cp_read_latency aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_cp_reads","title":"raid_disk_cp_reads","text":"

    Number of disk read operations initiated each second for consistency point processing. raid_disk_cp_reads is disk_cp_reads aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_io_pending","title":"raid_disk_io_pending","text":"

    Average number of I/Os issued to the disk for which we have not yet received the response. raid_disk_io_pending is disk_io_pending aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_io_queued","title":"raid_disk_io_queued","text":"

    Number of I/Os queued to the disk but not yet issued. raid_disk_io_queued is disk_io_queued aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_total_data","title":"raid_disk_total_data","text":"

    Total throughput for user operations per second. raid_disk_total_data is disk_total_data aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_total_transfers","title":"raid_disk_total_transfers","text":"

    Total number of disk operations involving data transfer initiated per second. raid_disk_total_transfers is disk_total_transfers aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_read_blocks","title":"raid_disk_user_read_blocks","text":"

    Number of blocks transferred for user read operations per second. raid_disk_user_read_blocks is disk_user_read_blocks aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_read_chain","title":"raid_disk_user_read_chain","text":"

    Average number of blocks transferred in each user read operation. raid_disk_user_read_chain is disk_user_read_chain aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_read_latency","title":"raid_disk_user_read_latency","text":"

    Average latency per block in microseconds for user read operations. raid_disk_user_read_latency is disk_user_read_latency aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_reads","title":"raid_disk_user_reads","text":"

    Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. raid_disk_user_reads is disk_user_reads aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_write_blocks","title":"raid_disk_user_write_blocks","text":"

    Number of blocks transferred for user write operations per second. raid_disk_user_write_blocks is disk_user_write_blocks aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_write_chain","title":"raid_disk_user_write_chain","text":"

    Average number of blocks transferred in each user write operation. raid_disk_user_write_chain is disk_user_write_chain aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_write_latency","title":"raid_disk_user_write_latency","text":"

    Average latency per block in microseconds for user write operations. raid_disk_user_write_latency is disk_user_write_latency aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_writes","title":"raid_disk_user_writes","text":"

    Number of disk write operations initiated each second for storing data or metadata associated with user requests. raid_disk_user_writes is disk_user_writes aggregated by raid.

    API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#rw_ctx_cifs_giveups","title":"rw_ctx_cifs_giveups","text":"

    Array of number of given-ups of cifs ops because they rewind more than a certain threshold, categorized by their rewind reasons.

    API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx cifs_giveupsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_cifs_rewinds","title":"rw_ctx_cifs_rewinds","text":"

    Array of number of rewinds for cifs ops based on their reasons.

    API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx cifs_rewindsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_nfs_giveups","title":"rw_ctx_nfs_giveups","text":"

    Array of number of given-ups of nfs ops because they rewind more than a certain threshold, categorized by their rewind reasons.

    API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx nfs_giveupsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_nfs_rewinds","title":"rw_ctx_nfs_rewinds","text":"

    Array of number of rewinds for nfs ops based on their reasons.

    API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx nfs_rewindsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_qos_flowcontrol","title":"rw_ctx_qos_flowcontrol","text":"

    The number of times QoS limiting has enabled stream flowcontrol.

    API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx qos_flowcontrolUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_qos_rewinds","title":"rw_ctx_qos_rewinds","text":"

    The number of restarts after a rewind because of QoS limiting.

    API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx qos_rewindsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#security_audit_destination_port","title":"security_audit_destination_port","text":"

    The destination port used to forward the message.

    API Endpoint Metric Template ZAPI cluster-log-forward-get-iter cluster-log-forward-info.port conf/zapi/cdot/9.8.0/security_audit_dest.yaml"},{"location":"ontap-metrics/#security_certificate_expiry_time","title":"security_certificate_expiry_time","text":"API Endpoint Metric Template REST api/private/cli/security/certificate expiration conf/rest/9.12.0/security_certificate.yaml ZAPI security-certificate-get-iter certificate-info.expiration-date conf/zapi/cdot/9.8.0/security_certificate.yaml"},{"location":"ontap-metrics/#security_ssh_max_instances","title":"security_ssh_max_instances","text":"

    Maximum possible simultaneous connections.

    API Endpoint Metric Template REST api/security/ssh max_instances conf/rest/9.12.0/security_ssh.yaml"},{"location":"ontap-metrics/#shelf_average_ambient_temperature","title":"shelf_average_ambient_temperature","text":"

    Average temperature of all ambient sensors for shelf in Celsius.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_average_fan_speed","title":"shelf_average_fan_speed","text":"

    Average fan speed for shelf in rpm.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_average_temperature","title":"shelf_average_temperature","text":"

    Average temperature of all non-ambient sensors for shelf in Celsius.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_disk_count","title":"shelf_disk_count","text":"

    Disk count in a shelf.

    API Endpoint Metric Template REST api/storage/shelves disk_count conf/rest/9.12.0/shelf.yaml ZAPI storage-shelf-info-get-iter storage-shelf-info.disk-count conf/zapi/cdot/9.8.0/shelf.yaml"},{"location":"ontap-metrics/#shelf_max_fan_speed","title":"shelf_max_fan_speed","text":"

    Maximum fan speed for shelf in rpm.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_max_temperature","title":"shelf_max_temperature","text":"

    Maximum temperature of all non-ambient sensors for shelf in Celsius.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_min_ambient_temperature","title":"shelf_min_ambient_temperature","text":"

    Minimum temperature of all ambient sensors for shelf in Celsius.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_min_fan_speed","title":"shelf_min_fan_speed","text":"

    Minimum fan speed for shelf in rpm.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_min_temperature","title":"shelf_min_temperature","text":"

    Minimum temperature of all non-ambient sensors for shelf in Celsius.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_power","title":"shelf_power","text":"

    Power consumed by shelf in Watts.

    API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#smb2_close_latency","title":"smb2_close_latency","text":"

    Average latency for SMB2_COM_CLOSE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 close_latencyUnit: microsecType: averageBase: close_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 close_latencyUnit: microsecType: averageBase: close_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_close_latency_histogram","title":"smb2_close_latency_histogram","text":"

    Latency histogram for SMB2_COM_CLOSE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 close_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 close_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_close_ops","title":"smb2_close_ops","text":"

    Number of SMB2_COM_CLOSE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 close_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 close_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_create_latency","title":"smb2_create_latency","text":"

    Average latency for SMB2_COM_CREATE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 create_latencyUnit: microsecType: averageBase: create_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 create_latencyUnit: microsecType: averageBase: create_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_create_latency_histogram","title":"smb2_create_latency_histogram","text":"

    Latency histogram for SMB2_COM_CREATE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 create_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 create_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_create_ops","title":"smb2_create_ops","text":"

    Number of SMB2_COM_CREATE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 create_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 create_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_lock_latency","title":"smb2_lock_latency","text":"

    Average latency for SMB2_COM_LOCK operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 lock_latencyUnit: microsecType: averageBase: lock_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 lock_latencyUnit: microsecType: averageBase: lock_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_lock_latency_histogram","title":"smb2_lock_latency_histogram","text":"

    Latency histogram for SMB2_COM_LOCK operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 lock_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 lock_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_lock_ops","title":"smb2_lock_ops","text":"

    Number of SMB2_COM_LOCK operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 lock_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 lock_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_negotiate_latency","title":"smb2_negotiate_latency","text":"

    Average latency for SMB2_COM_NEGOTIATE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 negotiate_latencyUnit: microsecType: averageBase: negotiate_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 negotiate_latencyUnit: microsecType: averageBase: negotiate_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_negotiate_ops","title":"smb2_negotiate_ops","text":"

    Number of SMB2_COM_NEGOTIATE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 negotiate_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 negotiate_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_oplock_break_latency","title":"smb2_oplock_break_latency","text":"

    Average latency for SMB2_COM_OPLOCK_BREAK operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 oplock_break_latencyUnit: microsecType: averageBase: oplock_break_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 oplock_break_latencyUnit: microsecType: averageBase: oplock_break_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_oplock_break_latency_histogram","title":"smb2_oplock_break_latency_histogram","text":"

    Latency histogram for SMB2_COM_OPLOCK_BREAK operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 oplock_break_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 oplock_break_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_oplock_break_ops","title":"smb2_oplock_break_ops","text":"

    Number of SMB2_COM_OPLOCK_BREAK operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 oplock_break_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 oplock_break_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_directory_latency","title":"smb2_query_directory_latency","text":"

    Average latency for SMB2_COM_QUERY_DIRECTORY operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_directory_latencyUnit: microsecType: averageBase: query_directory_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_directory_latencyUnit: microsecType: averageBase: query_directory_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_directory_latency_histogram","title":"smb2_query_directory_latency_histogram","text":"

    Latency histogram for SMB2_COM_QUERY_DIRECTORY operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_directory_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_directory_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_directory_ops","title":"smb2_query_directory_ops","text":"

    Number of SMB2_COM_QUERY_DIRECTORY operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_directory_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_directory_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_info_latency","title":"smb2_query_info_latency","text":"

    Average latency for SMB2_COM_QUERY_INFO operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_info_latencyUnit: microsecType: averageBase: query_info_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_info_latencyUnit: microsecType: averageBase: query_info_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_info_latency_histogram","title":"smb2_query_info_latency_histogram","text":"

    Latency histogram for SMB2_COM_QUERY_INFO operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_info_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_info_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_info_ops","title":"smb2_query_info_ops","text":"

    Number of SMB2_COM_QUERY_INFO operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_info_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_info_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_read_latency","title":"smb2_read_latency","text":"

    Average latency for SMB2_COM_READ operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_read_ops","title":"smb2_read_ops","text":"

    Number of SMB2_COM_READ operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 read_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_session_setup_latency","title":"smb2_session_setup_latency","text":"

    Average latency for SMB2_COM_SESSION_SETUP operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 session_setup_latencyUnit: microsecType: averageBase: session_setup_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 session_setup_latencyUnit: microsecType: averageBase: session_setup_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_session_setup_latency_histogram","title":"smb2_session_setup_latency_histogram","text":"

    Latency histogram for SMB2_COM_SESSION_SETUP operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 session_setup_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 session_setup_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_session_setup_ops","title":"smb2_session_setup_ops","text":"

    Number of SMB2_COM_SESSION_SETUP operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 session_setup_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 session_setup_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_set_info_latency","title":"smb2_set_info_latency","text":"

    Average latency for SMB2_COM_SET_INFO operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 set_info_latencyUnit: microsecType: averageBase: set_info_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 set_info_latencyUnit: microsecType: averageBase: set_info_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_set_info_latency_histogram","title":"smb2_set_info_latency_histogram","text":"

    Latency histogram for SMB2_COM_SET_INFO operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 set_info_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 set_info_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_set_info_ops","title":"smb2_set_info_ops","text":"

    Number of SMB2_COM_SET_INFO operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 set_info_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 set_info_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_tree_connect_latency","title":"smb2_tree_connect_latency","text":"

    Average latency for SMB2_COM_TREE_CONNECT operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 tree_connect_latencyUnit: microsecType: averageBase: tree_connect_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 tree_connect_latencyUnit: microsecType: averageBase: tree_connect_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_tree_connect_ops","title":"smb2_tree_connect_ops","text":"

    Number of SMB2_COM_TREE_CONNECT operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 tree_connect_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 tree_connect_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_write_latency","title":"smb2_write_latency","text":"

    Average latency for SMB2_COM_WRITE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 write_latencyUnit: microsecType: averageBase: write_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_write_ops","title":"smb2_write_ops","text":"

    Number of SMB2_COM_WRITE operations

    API Endpoint Metric Template REST api/cluster/counter/tables/smb2 write_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#snapmirror_break_failed_count","title":"snapmirror_break_failed_count","text":"

    The number of failed SnapMirror break operations for the relationship

    API Endpoint Metric Template REST api/private/cli/snapmirror break_failed_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.break-failed-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_break_successful_count","title":"snapmirror_break_successful_count","text":"

    The number of successful SnapMirror break operations for the relationship

    API Endpoint Metric Template REST api/private/cli/snapmirror break_successful_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.break-successful-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_lag_time","title":"snapmirror_lag_time","text":"

    Amount of time since the last snapmirror transfer in seconds

    API Endpoint Metric Template REST api/private/cli/snapmirror lag_time conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.lag-time conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_last_transfer_duration","title":"snapmirror_last_transfer_duration","text":"

    Duration of the last SnapMirror transfer in seconds

    API Endpoint Metric Template REST api/private/cli/snapmirror last_transfer_duration conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.last-transfer-duration conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_last_transfer_end_timestamp","title":"snapmirror_last_transfer_end_timestamp","text":"

    The Timestamp of the end of the last transfer

    API Endpoint Metric Template REST api/private/cli/snapmirror last_transfer_end_timestamp conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.last-transfer-end-timestamp conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_last_transfer_size","title":"snapmirror_last_transfer_size","text":"

    Size in kilobytes (1024 bytes) of the last transfer

    API Endpoint Metric Template REST api/private/cli/snapmirror last_transfer_size conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.last-transfer-size conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_newest_snapshot_timestamp","title":"snapmirror_newest_snapshot_timestamp","text":"

    The timestamp of the newest Snapshot copy on the destination volume

    API Endpoint Metric Template REST api/private/cli/snapmirror newest_snapshot_timestamp conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.newest-snapshot-timestamp conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_resync_failed_count","title":"snapmirror_resync_failed_count","text":"

    The number of failed SnapMirror resync operations for the relationship

    API Endpoint Metric Template REST api/private/cli/snapmirror resync_failed_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.resync-failed-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_resync_successful_count","title":"snapmirror_resync_successful_count","text":"

    The number of successful SnapMirror resync operations for the relationship

    API Endpoint Metric Template REST api/private/cli/snapmirror resync_successful_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.resync-successful-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_total_transfer_bytes","title":"snapmirror_total_transfer_bytes","text":"

    Cumulative bytes transferred for the relationship

    API Endpoint Metric Template REST api/private/cli/snapmirror total_transfer_bytes conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.total-transfer-bytes conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_total_transfer_time_secs","title":"snapmirror_total_transfer_time_secs","text":"

    Cumulative total transfer time in seconds for the relationship

    API Endpoint Metric Template REST api/private/cli/snapmirror total_transfer_time_secs conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.total-transfer-time-secs conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_update_failed_count","title":"snapmirror_update_failed_count","text":"

    The number of successful SnapMirror update operations for the relationship

    API Endpoint Metric Template REST api/private/cli/snapmirror update_failed_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.update-failed-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_update_successful_count","title":"snapmirror_update_successful_count","text":"

    Number of Successful Updates

    API Endpoint Metric Template REST api/private/cli/snapmirror update_successful_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.update-successful-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapshot_policy_total_schedules","title":"snapshot_policy_total_schedules","text":"

    Total Number of Schedules in this Policy

    API Endpoint Metric Template REST api/private/cli/snapshot/policy total_schedules conf/rest/9.12.0/snapshotpolicy.yaml ZAPI snapshot-policy-get-iter snapshot-policy-info.total-schedules conf/zapi/cdot/9.8.0/snapshotpolicy.yaml"},{"location":"ontap-metrics/#svm_cifs_connections","title":"svm_cifs_connections","text":"

    Number of connections

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs connectionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver connectionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_established_sessions","title":"svm_cifs_established_sessions","text":"

    Number of established SMB and SMB2 sessions

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs established_sessionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver established_sessionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_latency","title":"svm_cifs_latency","text":"

    Average latency for CIFS operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs latencyUnit: microsecType: averageBase: latency_base conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_latencyUnit: microsecType: averageBase: cifs_latency_base conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_op_count","title":"svm_cifs_op_count","text":"

    Array of select CIFS operation counts

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs op_countUnit: noneType: rateBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_op_countUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_open_files","title":"svm_cifs_open_files","text":"

    Number of open files over SMB and SMB2

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs open_filesUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver open_filesUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_ops","title":"svm_cifs_ops","text":"

    Total number of CIFS operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_read_latency","title":"svm_cifs_read_latency","text":"

    Average latency for CIFS read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs average_read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_read_latencyUnit: microsecType: averageBase: cifs_read_ops conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_read_ops","title":"svm_cifs_read_ops","text":"

    Total number of CIFS read operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_signed_sessions","title":"svm_cifs_signed_sessions","text":"

    Number of signed SMB and SMB2 sessions.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs signed_sessionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver signed_sessionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_write_latency","title":"svm_cifs_write_latency","text":"

    Average latency for CIFS write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs average_write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_write_latencyUnit: microsecType: averageBase: cifs_write_ops conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_write_ops","title":"svm_cifs_write_ops","text":"

    Total number of CIFS write operations

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_nfs_access_avg_latency","title":"svm_nfs_access_avg_latency","text":"

    Average latency of Access procedure requests. The counter keeps track of the average response time of Access requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_access_total","title":"svm_nfs_access_total","text":"

    Total number of Access procedure requests. It is the total number of access success and access error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_backchannel_ctl_avg_latency","title":"svm_nfs_backchannel_ctl_avg_latency","text":"

    Average latency of BACKCHANNEL_CTL operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 backchannel_ctl.average_latencyUnit: microsecType: averageBase: backchannel_ctl.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 backchannel_ctl.average_latencyUnit: microsecType: averageBase: backchannel_ctl.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 backchannel_ctl_avg_latencyUnit: microsecType: average,no-zero-valuesBase: backchannel_ctl_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 backchannel_ctl_avg_latencyUnit: microsecType: average,no-zero-valuesBase: backchannel_ctl_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_backchannel_ctl_total","title":"svm_nfs_backchannel_ctl_total","text":"

    Total number of BACKCHANNEL_CTL operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 backchannel_ctl.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 backchannel_ctl.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 backchannel_ctl_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 backchannel_ctl_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_bind_conn_to_session_avg_latency","title":"svm_nfs_bind_conn_to_session_avg_latency","text":"

    Average latency of BIND_CONN_TO_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 bind_connections_to_session.average_latencyUnit: microsecType: averageBase: bind_connections_to_session.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 bind_conn_to_session.average_latencyUnit: microsecType: averageBase: bind_conn_to_session.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 bind_conn_to_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: bind_conn_to_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 bind_conn_to_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: bind_conn_to_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_bind_conn_to_session_total","title":"svm_nfs_bind_conn_to_session_total","text":"

    Total number of BIND_CONN_TO_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 bind_connections_to_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 bind_conn_to_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 bind_conn_to_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 bind_conn_to_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_close_avg_latency","title":"svm_nfs_close_avg_latency","text":"

    Average latency of CLOSE procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_close_total","title":"svm_nfs_close_total","text":"

    Total number of CLOSE procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_commit_avg_latency","title":"svm_nfs_commit_avg_latency","text":"

    Average latency of Commit procedure requests. The counter keeps track of the average response time of Commit requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_commit_total","title":"svm_nfs_commit_total","text":"

    Total number of Commit procedure requests. It is the total number of Commit success and Commit error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_create_avg_latency","title":"svm_nfs_create_avg_latency","text":"

    Average latency of Create procedure requests. The counter keeps track of the average response time of Create requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_create_session_avg_latency","title":"svm_nfs_create_session_avg_latency","text":"

    Average latency of CREATE_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 create_session.average_latencyUnit: microsecType: averageBase: create_session.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 create_session.average_latencyUnit: microsecType: averageBase: create_session.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 create_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 create_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_create_session_total","title":"svm_nfs_create_session_total","text":"

    Total number of CREATE_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 create_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 create_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 create_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 create_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_create_total","title":"svm_nfs_create_total","text":"

    Total number Create of procedure requests. It is the total number of create success and create error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_delegpurge_avg_latency","title":"svm_nfs_delegpurge_avg_latency","text":"

    Average latency of DELEGPURGE procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_delegpurge_total","title":"svm_nfs_delegpurge_total","text":"

    Total number of DELEGPURGE procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_delegreturn_avg_latency","title":"svm_nfs_delegreturn_avg_latency","text":"

    Average latency of DELEGRETURN procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_delegreturn_total","title":"svm_nfs_delegreturn_total","text":"

    Total number of DELEGRETURN procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_destroy_clientid_avg_latency","title":"svm_nfs_destroy_clientid_avg_latency","text":"

    Average latency of DESTROY_CLIENTID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 destroy_clientid.average_latencyUnit: microsecType: averageBase: destroy_clientid.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 destroy_clientid.average_latencyUnit: microsecType: averageBase: destroy_clientid.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 destroy_clientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_clientid_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 destroy_clientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_clientid_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_destroy_clientid_total","title":"svm_nfs_destroy_clientid_total","text":"

    Total number of DESTROY_CLIENTID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 destroy_clientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 destroy_clientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 destroy_clientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 destroy_clientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_destroy_session_avg_latency","title":"svm_nfs_destroy_session_avg_latency","text":"

    Average latency of DESTROY_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 destroy_session.average_latencyUnit: microsecType: averageBase: destroy_session.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 destroy_session.average_latencyUnit: microsecType: averageBase: destroy_session.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 destroy_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 destroy_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_destroy_session_total","title":"svm_nfs_destroy_session_total","text":"

    Total number of DESTROY_SESSION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 destroy_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 destroy_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 destroy_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 destroy_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_exchange_id_avg_latency","title":"svm_nfs_exchange_id_avg_latency","text":"

    Average latency of EXCHANGE_ID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 exchange_id.average_latencyUnit: microsecType: averageBase: exchange_id.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 exchange_id.average_latencyUnit: microsecType: averageBase: exchange_id.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 exchange_id_avg_latencyUnit: microsecType: average,no-zero-valuesBase: exchange_id_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 exchange_id_avg_latencyUnit: microsecType: average,no-zero-valuesBase: exchange_id_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_exchange_id_total","title":"svm_nfs_exchange_id_total","text":"

    Total number of EXCHANGE_ID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 exchange_id.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 exchange_id.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 exchange_id_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 exchange_id_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_free_stateid_avg_latency","title":"svm_nfs_free_stateid_avg_latency","text":"

    Average latency of FREE_STATEID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 free_stateid.average_latencyUnit: microsecType: averageBase: free_stateid.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 free_stateid.average_latencyUnit: microsecType: averageBase: free_stateid.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 free_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: free_stateid_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 free_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: free_stateid_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_free_stateid_total","title":"svm_nfs_free_stateid_total","text":"

    Total number of FREE_STATEID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 free_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 free_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 free_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 free_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_fsinfo_avg_latency","title":"svm_nfs_fsinfo_avg_latency","text":"

    Average latency of FSInfo procedure requests. The counter keeps track of the average response time of FSInfo requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 fsinfo.average_latencyUnit: microsecType: averageBase: fsinfo.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 fsinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: fsinfo_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_fsinfo_total","title":"svm_nfs_fsinfo_total","text":"

    Total number FSInfo of procedure requests. It is the total number of FSInfo success and FSInfo error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 fsinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 fsinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_fsstat_avg_latency","title":"svm_nfs_fsstat_avg_latency","text":"

    Average latency of FSStat procedure requests. The counter keeps track of the average response time of FSStat requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 fsstat.average_latencyUnit: microsecType: averageBase: fsstat.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 fsstat_avg_latencyUnit: microsecType: average,no-zero-valuesBase: fsstat_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_fsstat_total","title":"svm_nfs_fsstat_total","text":"

    Total number FSStat of procedure requests. It is the total number of FSStat success and FSStat error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 fsstat.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 fsstat_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_get_dir_delegation_avg_latency","title":"svm_nfs_get_dir_delegation_avg_latency","text":"

    Average latency of GET_DIR_DELEGATION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 get_dir_delegation.average_latencyUnit: microsecType: averageBase: get_dir_delegation.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 get_dir_delegation.average_latencyUnit: microsecType: averageBase: get_dir_delegation.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 get_dir_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: get_dir_delegation_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 get_dir_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: get_dir_delegation_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_get_dir_delegation_total","title":"svm_nfs_get_dir_delegation_total","text":"

    Total number of GET_DIR_DELEGATION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 get_dir_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 get_dir_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 get_dir_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 get_dir_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getattr_avg_latency","title":"svm_nfs_getattr_avg_latency","text":"

    Average latency of GetAttr procedure requests. This counter keeps track of the average response time of GetAttr requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getattr_total","title":"svm_nfs_getattr_total","text":"

    Total number of Getattr procedure requests. It is the total number of getattr success and getattr error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getdeviceinfo_avg_latency","title":"svm_nfs_getdeviceinfo_avg_latency","text":"

    Average latency of GETDEVICEINFO operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 getdeviceinfo.average_latencyUnit: microsecType: averageBase: getdeviceinfo.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getdeviceinfo.average_latencyUnit: microsecType: averageBase: getdeviceinfo.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 getdeviceinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdeviceinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getdeviceinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdeviceinfo_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getdeviceinfo_total","title":"svm_nfs_getdeviceinfo_total","text":"

    Total number of GETDEVICEINFO operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 getdeviceinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getdeviceinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 getdeviceinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getdeviceinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getdevicelist_avg_latency","title":"svm_nfs_getdevicelist_avg_latency","text":"

    Average latency of GETDEVICELIST operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 getdevicelist.average_latencyUnit: microsecType: averageBase: getdevicelist.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getdevicelist.average_latencyUnit: microsecType: averageBase: getdevicelist.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 getdevicelist_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdevicelist_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getdevicelist_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdevicelist_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getdevicelist_total","title":"svm_nfs_getdevicelist_total","text":"

    Total number of GETDEVICELIST operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 getdevicelist.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getdevicelist.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 getdevicelist_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getdevicelist_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getfh_avg_latency","title":"svm_nfs_getfh_avg_latency","text":"

    Average latency of GETFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getfh_total","title":"svm_nfs_getfh_total","text":"

    Total number of GETFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_latency","title":"svm_nfs_latency","text":"

    Average latency of NFSv3 requests. This counter keeps track of the average response time of NFSv3 requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutcommit_avg_latency","title":"svm_nfs_layoutcommit_avg_latency","text":"

    Average latency of LAYOUTCOMMIT operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutcommit.average_latencyUnit: microsecType: averageBase: layoutcommit.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutcommit.average_latencyUnit: microsecType: averageBase: layoutcommit.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutcommit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutcommit_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutcommit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutcommit_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutcommit_total","title":"svm_nfs_layoutcommit_total","text":"

    Total number of LAYOUTCOMMIT operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutcommit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutcommit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutcommit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutcommit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutget_avg_latency","title":"svm_nfs_layoutget_avg_latency","text":"

    Average latency of LAYOUTGET operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutget.average_latencyUnit: microsecType: averageBase: layoutget.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutget.average_latencyUnit: microsecType: averageBase: layoutget.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutget_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutget_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutget_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutget_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutget_total","title":"svm_nfs_layoutget_total","text":"

    Total number of LAYOUTGET operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutget.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutget.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutget_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutget_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutreturn_avg_latency","title":"svm_nfs_layoutreturn_avg_latency","text":"

    Average latency of LAYOUTRETURN operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutreturn.average_latencyUnit: microsecType: averageBase: layoutreturn.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutreturn.average_latencyUnit: microsecType: averageBase: layoutreturn.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutreturn_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutreturn_total","title":"svm_nfs_layoutreturn_total","text":"

    Total number of LAYOUTRETURN operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_link_avg_latency","title":"svm_nfs_link_avg_latency","text":"

    Average latency of Link procedure requests. The counter keeps track of the average response time of Link requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_link_total","title":"svm_nfs_link_total","text":"

    Total number Link of procedure requests. It is the total number of Link success and Link error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lock_avg_latency","title":"svm_nfs_lock_avg_latency","text":"

    Average latency of LOCK procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lock_total","title":"svm_nfs_lock_total","text":"

    Total number of LOCK procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lockt_avg_latency","title":"svm_nfs_lockt_avg_latency","text":"

    Average latency of LOCKT procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lockt_total","title":"svm_nfs_lockt_total","text":"

    Total number of LOCKT procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_locku_avg_latency","title":"svm_nfs_locku_avg_latency","text":"

    Average latency of LOCKU procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_locku_total","title":"svm_nfs_locku_total","text":"

    Total number of LOCKU procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lookup_avg_latency","title":"svm_nfs_lookup_avg_latency","text":"

    Average latency of LookUp procedure requests. This shows the average time it takes for the LookUp operation to reply to the request.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lookup_total","title":"svm_nfs_lookup_total","text":"

    Total number of Lookup procedure requests. It is the total number of lookup success and lookup error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lookupp_avg_latency","title":"svm_nfs_lookupp_avg_latency","text":"

    Average latency of LOOKUPP procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lookupp_total","title":"svm_nfs_lookupp_total","text":"

    Total number of LOOKUPP procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_mkdir_avg_latency","title":"svm_nfs_mkdir_avg_latency","text":"

    Average latency of MkDir procedure requests. The counter keeps track of the average response time of MkDir requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 mkdir.average_latencyUnit: microsecType: averageBase: mkdir.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 mkdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: mkdir_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_mkdir_total","title":"svm_nfs_mkdir_total","text":"

    Total number MkDir of procedure requests. It is the total number of MkDir success and MkDir error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 mkdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 mkdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_mknod_avg_latency","title":"svm_nfs_mknod_avg_latency","text":"

    Average latency of MkNod procedure requests. The counter keeps track of the average response time of MkNod requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 mknod.average_latencyUnit: microsecType: averageBase: mknod.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 mknod_avg_latencyUnit: microsecType: average,no-zero-valuesBase: mknod_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_mknod_total","title":"svm_nfs_mknod_total","text":"

    Total number MkNod of procedure requests. It is the total number of MkNod success and MkNod error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 mknod.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 mknod_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_null_avg_latency","title":"svm_nfs_null_avg_latency","text":"

    Average latency of Null procedure requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_null_total","title":"svm_nfs_null_total","text":"

    Total number of Null procedure requests. It is the total of null success and null error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_nverify_avg_latency","title":"svm_nfs_nverify_avg_latency","text":"

    Average latency of NVERIFY procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_nverify_total","title":"svm_nfs_nverify_total","text":"

    Total number of NVERIFY procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_open_avg_latency","title":"svm_nfs_open_avg_latency","text":"

    Average latency of OPEN procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_open_confirm_avg_latency","title":"svm_nfs_open_confirm_avg_latency","text":"

    Average latency of OPEN_CONFIRM procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open_confirm.average_latencyUnit: microsecType: averageBase: open_confirm.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 open_confirm_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_confirm_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_open_confirm_total","title":"svm_nfs_open_confirm_total","text":"

    Total number of OPEN_CONFIRM procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open_confirm.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 open_confirm_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_open_downgrade_avg_latency","title":"svm_nfs_open_downgrade_avg_latency","text":"

    Average latency of OPEN_DOWNGRADE procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_open_downgrade_total","title":"svm_nfs_open_downgrade_total","text":"

    Total number of OPEN_DOWNGRADE procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_open_total","title":"svm_nfs_open_total","text":"

    Total number of OPEN procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_openattr_avg_latency","title":"svm_nfs_openattr_avg_latency","text":"

    Average latency of OPENATTR procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_openattr_total","title":"svm_nfs_openattr_total","text":"

    Total number of OPENATTR procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_ops","title":"svm_nfs_ops","text":"

    Total number of NFSv3 procedure requests per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_pathconf_avg_latency","title":"svm_nfs_pathconf_avg_latency","text":"

    Average latency of PathConf procedure requests. The counter keeps track of the average response time of PathConf requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 pathconf.average_latencyUnit: microsecType: averageBase: pathconf.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 pathconf_avg_latencyUnit: microsecType: average,no-zero-valuesBase: pathconf_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_pathconf_total","title":"svm_nfs_pathconf_total","text":"

    Total number PathConf of procedure requests. It is the total number of PathConf success and PathConf error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 pathconf.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 pathconf_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_putfh_avg_latency","title":"svm_nfs_putfh_avg_latency","text":"

    Average latency of PUTFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putfh.average_latencyUnit: microsecType: averageBase: putfh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putfh.average_latencyUnit: noneType: deltaBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putfh.average_latencyUnit: microsecType: averageBase: putfh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putfh_total","title":"svm_nfs_putfh_total","text":"

    Total number of PUTFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putpubfh_avg_latency","title":"svm_nfs_putpubfh_avg_latency","text":"

    Average latency of PUTPUBFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putpubfh_total","title":"svm_nfs_putpubfh_total","text":"

    Total number of PUTPUBFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putrootfh_avg_latency","title":"svm_nfs_putrootfh_avg_latency","text":"

    Average latency of PUTROOTFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putrootfh_total","title":"svm_nfs_putrootfh_total","text":"

    Total number of PUTROOTFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_read_avg_latency","title":"svm_nfs_read_avg_latency","text":"

    Average latency of Read procedure requests. The counter keeps track of the average response time of Read requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_read_ops","title":"svm_nfs_read_ops","text":"

    Total observed NFSv3 read operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_read_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_read_symlink_avg_latency","title":"svm_nfs_read_symlink_avg_latency","text":"

    Average latency of ReadSymLink procedure requests. The counter keeps track of the average response time of ReadSymLink requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read_symlink.average_latencyUnit: microsecType: averageBase: read_symlink.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 read_symlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_symlink_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_read_symlink_total","title":"svm_nfs_read_symlink_total","text":"

    Total number of ReadSymLink procedure requests. It is the total number of read symlink success and read symlink error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read_symlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 read_symlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_read_throughput","title":"svm_nfs_read_throughput","text":"

    Rate of NFSv3 read data transfers per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 nfs4_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nfs41_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nfs42_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_read_total","title":"svm_nfs_read_total","text":"

    Total number Read of procedure requests. It is the total number of read success and read error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_readdir_avg_latency","title":"svm_nfs_readdir_avg_latency","text":"

    Average latency of ReadDir procedure requests. The counter keeps track of the average response time of ReadDir requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_readdir_total","title":"svm_nfs_readdir_total","text":"

    Total number ReadDir of procedure requests. It is the total number of ReadDir success and ReadDir error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_readdirplus_avg_latency","title":"svm_nfs_readdirplus_avg_latency","text":"

    Average latency of ReadDirPlus procedure requests. The counter keeps track of the average response time of ReadDirPlus requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 readdirplus.average_latencyUnit: microsecType: averageBase: readdirplus.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 readdirplus_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdirplus_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_readdirplus_total","title":"svm_nfs_readdirplus_total","text":"

    Total number ReadDirPlus of procedure requests. It is the total number of ReadDirPlus success and ReadDirPlus error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 readdirplus.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 readdirplus_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_readlink_avg_latency","title":"svm_nfs_readlink_avg_latency","text":"

    Average latency of READLINK procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_readlink_total","title":"svm_nfs_readlink_total","text":"

    Total number of READLINK procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_reclaim_complete_avg_latency","title":"svm_nfs_reclaim_complete_avg_latency","text":"

    Average latency of RECLAIM_COMPLETE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 reclaim_complete.average_latencyUnit: microsecType: averageBase: reclaim_complete.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 reclaim_complete.average_latencyUnit: microsecType: averageBase: reclaim_complete.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 reclaim_complete_avg_latencyUnit: microsecType: average,no-zero-valuesBase: reclaim_complete_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 reclaim_complete_avg_latencyUnit: microsecType: average,no-zero-valuesBase: reclaim_complete_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_reclaim_complete_total","title":"svm_nfs_reclaim_complete_total","text":"

    Total number of RECLAIM_COMPLETE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 reclaim_complete.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 reclaim_complete.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 reclaim_complete_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 reclaim_complete_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_release_lock_owner_avg_latency","title":"svm_nfs_release_lock_owner_avg_latency","text":"

    Average Latency of RELEASE_LOCKOWNER procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 release_lock_owner.average_latencyUnit: microsecType: averageBase: release_lock_owner.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 release_lock_owner_avg_latencyUnit: microsecType: average,no-zero-valuesBase: release_lock_owner_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_release_lock_owner_total","title":"svm_nfs_release_lock_owner_total","text":"

    Total number of RELEASE_LOCKOWNER procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 release_lock_owner.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 release_lock_owner_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_remove_avg_latency","title":"svm_nfs_remove_avg_latency","text":"

    Average latency of Remove procedure requests. The counter keeps track of the average response time of Remove requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_remove_total","title":"svm_nfs_remove_total","text":"

    Total number Remove of procedure requests. It is the total number of Remove success and Remove error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_rename_avg_latency","title":"svm_nfs_rename_avg_latency","text":"

    Average latency of Rename procedure requests. The counter keeps track of the average response time of Rename requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_rename_total","title":"svm_nfs_rename_total","text":"

    Total number Rename of procedure requests. It is the total number of Rename success and Rename error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_renew_avg_latency","title":"svm_nfs_renew_avg_latency","text":"

    Average latency of RENEW procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 renew.average_latencyUnit: microsecType: averageBase: renew.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 renew_avg_latencyUnit: microsecType: average,no-zero-valuesBase: renew_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_renew_total","title":"svm_nfs_renew_total","text":"

    Total number of RENEW procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 renew.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 renew_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_restorefh_avg_latency","title":"svm_nfs_restorefh_avg_latency","text":"

    Average latency of RESTOREFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_restorefh_total","title":"svm_nfs_restorefh_total","text":"

    Total number of RESTOREFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_rmdir_avg_latency","title":"svm_nfs_rmdir_avg_latency","text":"

    Average latency of RmDir procedure requests. The counter keeps track of the average response time of RmDir requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 rmdir.average_latencyUnit: microsecType: averageBase: rmdir.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 rmdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rmdir_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_rmdir_total","title":"svm_nfs_rmdir_total","text":"

    Total number RmDir of procedure requests. It is the total number of RmDir success and RmDir error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 rmdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 rmdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_savefh_avg_latency","title":"svm_nfs_savefh_avg_latency","text":"

    Average latency of SAVEFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_savefh_total","title":"svm_nfs_savefh_total","text":"

    Total number of SAVEFH procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_secinfo_avg_latency","title":"svm_nfs_secinfo_avg_latency","text":"

    Average latency of SECINFO procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_secinfo_no_name_avg_latency","title":"svm_nfs_secinfo_no_name_avg_latency","text":"

    Average latency of SECINFO_NO_NAME operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 secinfo_no_name.average_latencyUnit: microsecType: averageBase: secinfo_no_name.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 secinfo_no_name.average_latencyUnit: microsecType: averageBase: secinfo_no_name.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 secinfo_no_name_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_no_name_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 secinfo_no_name_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_no_name_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_secinfo_no_name_total","title":"svm_nfs_secinfo_no_name_total","text":"

    Total number of SECINFO_NO_NAME operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 secinfo_no_name.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 secinfo_no_name.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 secinfo_no_name_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 secinfo_no_name_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_secinfo_total","title":"svm_nfs_secinfo_total","text":"

    Total number of SECINFO procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_sequence_avg_latency","title":"svm_nfs_sequence_avg_latency","text":"

    Average latency of SEQUENCE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 sequence.average_latencyUnit: microsecType: averageBase: sequence.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 sequence.average_latencyUnit: microsecType: averageBase: sequence.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 sequence_avg_latencyUnit: microsecType: average,no-zero-valuesBase: sequence_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 sequence_avg_latencyUnit: microsecType: average,no-zero-valuesBase: sequence_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_sequence_total","title":"svm_nfs_sequence_total","text":"

    Total number of SEQUENCE operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 sequence.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 sequence.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 sequence_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 sequence_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_set_ssv_avg_latency","title":"svm_nfs_set_ssv_avg_latency","text":"

    Average latency of SET_SSV operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 set_ssv.average_latencyUnit: microsecType: averageBase: set_ssv.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 set_ssv.average_latencyUnit: microsecType: averageBase: set_ssv.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 set_ssv_avg_latencyUnit: microsecType: average,no-zero-valuesBase: set_ssv_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 set_ssv_avg_latencyUnit: microsecType: average,no-zero-valuesBase: set_ssv_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_set_ssv_total","title":"svm_nfs_set_ssv_total","text":"

    Total number of SET_SSV operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 set_ssv.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 set_ssv.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 set_ssv_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 set_ssv_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_setattr_avg_latency","title":"svm_nfs_setattr_avg_latency","text":"

    Average latency of SetAttr procedure requests. The counter keeps track of the average response time of SetAttr requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_setattr_total","title":"svm_nfs_setattr_total","text":"

    Total number of Setattr procedure requests. It is the total number of Setattr success and setattr error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_setclientid_avg_latency","title":"svm_nfs_setclientid_avg_latency","text":"

    Average latency of SETCLIENTID procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 setclientid.average_latencyUnit: microsecType: averageBase: setclientid.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 setclientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setclientid_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_setclientid_confirm_avg_latency","title":"svm_nfs_setclientid_confirm_avg_latency","text":"

    Average latency of SETCLIENTID_CONFIRM procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 setclientid_confirm.average_latencyUnit: microsecType: averageBase: setclientid_confirm.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 setclientid_confirm_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setclientid_confirm_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_setclientid_confirm_total","title":"svm_nfs_setclientid_confirm_total","text":"

    Total number of SETCLIENTID_CONFIRM procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 setclientid_confirm.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 setclientid_confirm_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_setclientid_total","title":"svm_nfs_setclientid_total","text":"

    Total number of SETCLIENTID procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 setclientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 setclientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_symlink_avg_latency","title":"svm_nfs_symlink_avg_latency","text":"

    Average latency of SymLink procedure requests. The counter keeps track of the average response time of SymLink requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 symlink.average_latencyUnit: microsecType: averageBase: symlink.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 symlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: symlink_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_symlink_total","title":"svm_nfs_symlink_total","text":"

    Total number SymLink of procedure requests. It is the total number of SymLink success and create SymLink requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 symlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 symlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_test_stateid_avg_latency","title":"svm_nfs_test_stateid_avg_latency","text":"

    Average latency of TEST_STATEID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 test_stateid.average_latencyUnit: microsecType: averageBase: test_stateid.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 test_stateid.average_latencyUnit: microsecType: averageBase: test_stateid.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 test_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: test_stateid_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 test_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: test_stateid_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_test_stateid_total","title":"svm_nfs_test_stateid_total","text":"

    Total number of TEST_STATEID operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 test_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 test_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 test_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 test_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_throughput","title":"svm_nfs_throughput","text":"

    Rate of NFSv3 data transfers per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 nfs4_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nfs41_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nfs42_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_verify_avg_latency","title":"svm_nfs_verify_avg_latency","text":"

    Average latency of VERIFY procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_verify_total","title":"svm_nfs_verify_total","text":"

    Total number of VERIFY procedures

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_want_delegation_avg_latency","title":"svm_nfs_want_delegation_avg_latency","text":"

    Average latency of WANT_DELEGATION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 want_delegation.average_latencyUnit: microsecType: averageBase: want_delegation.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 want_delegation.average_latencyUnit: microsecType: averageBase: want_delegation.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 want_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: want_delegation_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 want_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: want_delegation_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_want_delegation_total","title":"svm_nfs_want_delegation_total","text":"

    Total number of WANT_DELEGATION operations.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 want_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 want_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 want_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 want_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_write_avg_latency","title":"svm_nfs_write_avg_latency","text":"

    Average latency of Write procedure requests. The counter keeps track of the average response time of Write requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_write_ops","title":"svm_nfs_write_ops","text":"

    Total observed NFSv3 write operations per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_write_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_write_throughput","title":"svm_nfs_write_throughput","text":"

    Rate of NFSv3 write data transfers per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 nfs4_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nfs41_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nfs42_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_write_total","title":"svm_nfs_write_total","text":"

    Total number of Write procedure requests. It is the total number of write success and write error requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_vol_avg_latency","title":"svm_vol_avg_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_other_latency","title":"svm_vol_other_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm other_latencyUnit: microsecType: averageBase: total_other_ops conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_other_ops","title":"svm_vol_other_ops","text":"

    Number of other operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm total_other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_read_data","title":"svm_vol_read_data","text":"

    Bytes read per second

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm bytes_readUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_read_latency","title":"svm_vol_read_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_read_ops","title":"svm_vol_read_ops","text":"

    Number of read operations per second from the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_total_ops","title":"svm_vol_total_ops","text":"

    Number of operations per second serviced by the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_write_data","title":"svm_vol_write_data","text":"

    Bytes written per second

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm bytes_writtenUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_write_latency","title":"svm_vol_write_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_write_ops","title":"svm_vol_write_ops","text":"

    Number of write operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_connections_active","title":"svm_vscan_connections_active","text":"

    Total number of current active connections

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan connections_activeUnit: noneType: rawBase: conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan connections_activeUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_dispatch_latency","title":"svm_vscan_dispatch_latency","text":"

    Average dispatch latency

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan dispatch.latencyUnit: microsecType: averageBase: dispatch.requests conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan dispatch_latencyUnit: microsecType: averageBase: dispatch_latency_base conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_scan_latency","title":"svm_vscan_scan_latency","text":"

    Average scan latency

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan scan.latencyUnit: microsecType: averageBase: scan.requests conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan scan_latencyUnit: microsecType: averageBase: scan_latency_base conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_scan_noti_received_rate","title":"svm_vscan_scan_noti_received_rate","text":"

    Total number of scan notifications received by the dispatcher per second

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan scan.notification_received_rateUnit: per_secType: rateBase: conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan scan_noti_received_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_scan_request_dispatched_rate","title":"svm_vscan_scan_request_dispatched_rate","text":"

    Total number of scan requests sent to the Vscanner per second

    API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan scan.request_dispatched_rateUnit: per_secType: rateBase: conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan scan_request_dispatched_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#token_copy_bytes","title":"token_copy_bytes","text":"

    Total number of bytes copied.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_copy.bytesUnit: noneType: rateBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_copy_bytesUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_copy_failure","title":"token_copy_failure","text":"

    Number of failed token copy requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_copy.failuresUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_copy_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_copy_success","title":"token_copy_success","text":"

    Number of successful token copy requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_copy.successesUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_copy_successUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_create_bytes","title":"token_create_bytes","text":"

    Total number of bytes for which tokens are created.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_create.bytesUnit: noneType: rateBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_create_bytesUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_create_failure","title":"token_create_failure","text":"

    Number of failed token create requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_create.failuresUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_create_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_create_success","title":"token_create_success","text":"

    Number of successful token create requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_create.successesUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_create_successUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_zero_bytes","title":"token_zero_bytes","text":"

    Total number of bytes zeroed.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_zero.bytesUnit: noneType: rateBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_zero_bytesUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_zero_failure","title":"token_zero_failure","text":"

    Number of failed token zero requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_zero.failuresUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_zero_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_zero_success","title":"token_zero_success","text":"

    Number of successful token zero requests.

    API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_zero.successesUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_zero_successUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#volume_autosize_grow_threshold_percent","title":"volume_autosize_grow_threshold_percent","text":"

    Used space threshold which triggers autogrow. When the size-used is greater than this percent of size-total, the volume will be grown. The computed value is rounded down. The default value of this element varies from 85% to 98%, depending on the volume size. It is an error for the grow threshold to be less than or equal to the shrink threshold.

    API Endpoint Metric Template REST api/private/cli/volume autosize_grow_threshold_percent conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-autosize-attributes.grow-threshold-percent conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_autosize_maximum_size","title":"volume_autosize_maximum_size","text":"

    The maximum size (in bytes) to which the volume would be grown automatically. The default value is 20% greater than the volume size. It is an error for the maximum volume size to be less than the current volume size. It is also an error for the maximum size to be less than or equal to the minimum size.

    API Endpoint Metric Template REST api/private/cli/volume max_autosize conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-autosize-attributes.maximum-size conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_avg_latency","title":"volume_avg_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_capacity_tier_footprint","title":"volume_capacity_tier_footprint","text":"API Endpoint Metric Template REST api/private/cli/volume/footprint volume_blocks_footprint_bin1 conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_capacity_tier_footprint_percent","title":"volume_capacity_tier_footprint_percent","text":"API Endpoint Metric Template REST api/private/cli/volume/footprint volume_blocks_footprint_bin1_percent conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_filesystem_size","title":"volume_filesystem_size","text":"

    Filesystem size (in bytes) of the volume. This is the total usable size of the volume, not including WAFL reserve. This value is the same as Size except for certain SnapMirror destination volumes. It is possible for destination volumes to have a different filesystem-size because the filesystem-size is sent across from the source volume. This field is valid only when the volume is online.

    API Endpoint Metric Template REST api/private/cli/volume filesystem_size conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.filesystem-size conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_inode_files_total","title":"volume_inode_files_total","text":"

    Total user-visible file (inode) count, i.e., current maximum number of user-visible files (inodes) that this volume can currently hold.

    API Endpoint Metric Template REST api/private/cli/volume files conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-inode-attributes.files-total conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_inode_files_used","title":"volume_inode_files_used","text":"

    Number of user-visible files (inodes) used. This field is valid only when the volume is online.

    API Endpoint Metric Template REST api/private/cli/volume files_used conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-inode-attributes.files-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_inode_used_percent","title":"volume_inode_used_percent","text":"

    volume_inode_files_used / volume_inode_total

    API Endpoint Metric Template REST api/private/cli/volume inode_files_used, inode_files_total conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter inode_files_used, inode_files_total conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_access_latency","title":"volume_nfs_access_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol access requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.access_latencyUnit: microsecType: averageBase: nfs.access_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_access_latencyUnit: microsecType: averageBase: nfs_access_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_access_ops","title":"volume_nfs_access_ops","text":"

    Number of NFS accesses per second to the volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.access_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_access_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_getattr_latency","title":"volume_nfs_getattr_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol getattr requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.getattr_latencyUnit: microsecType: averageBase: nfs.getattr_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_getattr_latencyUnit: microsecType: averageBase: nfs_getattr_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_getattr_ops","title":"volume_nfs_getattr_ops","text":"

    Number of NFS getattr per second to the volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.getattr_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_getattr_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_lookup_latency","title":"volume_nfs_lookup_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol lookup requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.lookup_latencyUnit: microsecType: averageBase: nfs.lookup_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_lookup_latencyUnit: microsecType: averageBase: nfs_lookup_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_lookup_ops","title":"volume_nfs_lookup_ops","text":"

    Number of NFS lookups per second to the volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.lookup_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_lookup_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_other_latency","title":"volume_nfs_other_latency","text":"

    Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.other_latencyUnit: microsecType: averageBase: nfs.other_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_other_latencyUnit: microsecType: averageBase: nfs_other_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_other_ops","title":"volume_nfs_other_ops","text":"

    Number of other NFS operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_punch_hole_latency","title":"volume_nfs_punch_hole_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol hole-punch requests to the volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.punch_hole_latencyUnit: microsecType: averageBase: nfs.punch_hole_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_punch_hole_latencyUnit: microsecType: averageBase: nfs_punch_hole_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_punch_hole_ops","title":"volume_nfs_punch_hole_ops","text":"

    Number of NFS hole-punch requests per second to the volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.punch_hole_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_punch_hole_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_read_latency","title":"volume_nfs_read_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.read_latencyUnit: microsecType: averageBase: nfs.read_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_read_latencyUnit: microsecType: averageBase: nfs_read_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_read_ops","title":"volume_nfs_read_ops","text":"

    Number of NFS read operations per second from the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_setattr_latency","title":"volume_nfs_setattr_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol setattr requests to the volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.setattr_latencyUnit: microsecType: averageBase: nfs.setattr_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_setattr_latencyUnit: microsecType: averageBase: nfs_setattr_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_setattr_ops","title":"volume_nfs_setattr_ops","text":"

    Number of NFS setattr requests per second to the volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.setattr_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_setattr_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_total_ops","title":"volume_nfs_total_ops","text":"

    Number of total NFS operations per second to the volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_write_latency","title":"volume_nfs_write_latency","text":"

    Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.write_latencyUnit: microsecType: averageBase: nfs.write_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_write_latencyUnit: microsecType: averageBase: nfs_write_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_write_ops","title":"volume_nfs_write_ops","text":"

    Number of NFS write operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_num_compress_attempts","title":"volume_num_compress_attempts","text":"API Endpoint Metric Template REST api/private/cli/volume/efficiency/stat num_compress_attempts conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_num_compress_fail","title":"volume_num_compress_fail","text":"API Endpoint Metric Template REST api/private/cli/volume/efficiency/stat num_compress_fail conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_other_latency","title":"volume_other_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume other_latencyUnit: microsecType: averageBase: total_other_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_other_ops","title":"volume_other_ops","text":"

    Number of other operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume total_other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_overwrite_reserve_available","title":"volume_overwrite_reserve_available","text":"

    amount of storage space that is currently available for overwrites, calculated by subtracting the total amount of overwrite reserve space from the amount that has already been used.

    API Endpoint Metric Template REST api/private/cli/volume overwrite_reserve_total, overwrite_reserve_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter overwrite_reserve_total, overwrite_reserve_used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_overwrite_reserve_total","title":"volume_overwrite_reserve_total","text":"

    The size (in bytes) that is reserved for overwriting snapshotted data in an otherwise full volume. This space is usable only by space-reserved LUNs and files, and then only when the volume is full.This field is valid only when the volume is online.

    API Endpoint Metric Template REST api/private/cli/volume overwrite_reserve conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.overwrite-reserve conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_overwrite_reserve_used","title":"volume_overwrite_reserve_used","text":"

    The reserved size (in bytes) that is not available for new overwrites. The number includes both the reserved size which has actually been used for overwrites as well as the size which was never allocated in the first place. This field is valid only when the volume is online.

    API Endpoint Metric Template REST api/private/cli/volume overwrite_reserve_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.overwrite-reserve-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_performance_tier_footprint","title":"volume_performance_tier_footprint","text":"API Endpoint Metric Template REST api/private/cli/volume/footprint volume_blocks_footprint_bin0 conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_performance_tier_footprint_percent","title":"volume_performance_tier_footprint_percent","text":"API Endpoint Metric Template REST api/private/cli/volume/footprint volume_blocks_footprint_bin0_percent conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_read_data","title":"volume_read_data","text":"

    Bytes read per second

    API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_readUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_read_latency","title":"volume_read_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_read_ops","title":"volume_read_ops","text":"

    Number of read operations per second from the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_compress_saved","title":"volume_sis_compress_saved","text":"

    The total disk space (in bytes) that is saved by compressing blocks on the referenced file system.

    API Endpoint Metric Template REST api/private/cli/volume compression_space_saved conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.compression-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_compress_saved_percent","title":"volume_sis_compress_saved_percent","text":"

    Percentage of the total disk space that is saved by compressing blocks on the referenced file system

    API Endpoint Metric Template REST api/private/cli/volume compression_space_saved_percent conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.percentage-compression-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_dedup_saved","title":"volume_sis_dedup_saved","text":"

    The total disk space (in bytes) that is saved by deduplication and file cloning.

    API Endpoint Metric Template REST api/private/cli/volume dedupe_space_saved conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.deduplication-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_dedup_saved_percent","title":"volume_sis_dedup_saved_percent","text":"

    Percentage of the total disk space that is saved by deduplication and file cloning.

    API Endpoint Metric Template REST api/private/cli/volume dedupe_space_saved_percent conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.percentage-deduplication-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_total_saved","title":"volume_sis_total_saved","text":"

    Total space saved (in bytes) in the volume due to deduplication, compression, and file cloning.

    API Endpoint Metric Template REST api/private/cli/volume sis_space_saved conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.total-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_total_saved_percent","title":"volume_sis_total_saved_percent","text":"

    Percentage of total disk space that is saved by compressing blocks, deduplication and file cloning.

    API Endpoint Metric Template REST api/private/cli/volume sis_space_saved_percent conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.percentage-total-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size","title":"volume_size","text":"

    Physical size of the volume, in bytes. The minimum size for a FlexVol volume is 20MB and the minimum size for a FlexGroup volume is 200MB per constituent. The recommended size for a FlexGroup volume is a minimum of 100GB per constituent. For all volumes, the default size is equal to the minimum size.

    API Endpoint Metric Template REST api/private/cli/volume size conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size_available","title":"volume_size_available","text":"

    The size (in bytes) that is still available in the volume. This field is valid only when the volume is online.

    API Endpoint Metric Template REST api/private/cli/volume available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size_total","title":"volume_size_total","text":"

    Total usable size (in bytes) of the volume, not including WAFL reserve or volume snapshot reserve. If the volume is restricted or offline, a value of 0 is returned.

    API Endpoint Metric Template REST api/private/cli/volume total conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-total conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size_used","title":"volume_size_used","text":"

    Number of bytes used in the volume. If the volume is restricted or offline, a value of 0 is returned.

    API Endpoint Metric Template REST api/private/cli/volume used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size_used_percent","title":"volume_size_used_percent","text":"

    percentage of utilized storage space in a volume relative to its total capacity

    API Endpoint Metric Template REST api/private/cli/volume percent_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.percentage-size-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_count","title":"volume_snapshot_count","text":"

    Number of Snapshot copies in the volume.

    API Endpoint Metric Template REST api/private/cli/volume snapshot_count conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-snapshot-attributes.snapshot-count conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_available","title":"volume_snapshot_reserve_available","text":"

    The size (in bytes) that is available for Snapshot copies inside the Snapshot reserve. This value is zero if Snapshot spill is present. For 'none' guaranteed volumes, this may get reduced due to less available space in the aggregate. This parameter is not supported on Infinite Volumes.

    API Endpoint Metric Template REST api/private/cli/volume snapshot_reserve_available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.snapshot-reserve-available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_percent","title":"volume_snapshot_reserve_percent","text":"

    The percentage of volume disk space that has been set aside as reserve for snapshot usage.

    API Endpoint Metric Template REST api/private/cli/volume percent_snapshot_space conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.percentage-snapshot-reserve conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_size","title":"volume_snapshot_reserve_size","text":"

    The size (in bytes) in the volume that has been set aside as reserve for snapshot usage.

    API Endpoint Metric Template REST api/private/cli/volume snapshot_reserve_size conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.snapshot-reserve-size conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_used","title":"volume_snapshot_reserve_used","text":"

    amount of storage space currently used by a volume's snapshot reserve, which is calculated by subtracting the snapshot reserve available space from the snapshot reserve size.

    API Endpoint Metric Template REST api/private/cli/volume snapshot_reserve_size, snapshot_reserve_available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter snapshot_reserve_size, snapshot_reserve_available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_used_percent","title":"volume_snapshot_reserve_used_percent","text":"

    Percentage of the volume reserved for snapshots that has been used. Note that in some scenarios, it is possible to pass 100% of the space allocated.

    API Endpoint Metric Template REST api/private/cli/volume snapshot_space_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.percentage-snapshot-reserve-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshots_size_available","title":"volume_snapshots_size_available","text":"

    Total free space (in bytes) available in the volume and the snapshot reserve. If this value is 0 or negative, a new snapshot cannot be created.

    API Endpoint Metric Template REST api/private/cli/volume size_available_for_snapshots conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-available-for-snapshots conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshots_size_used","title":"volume_snapshots_size_used","text":"

    The size (in bytes) that is used by snapshots in the volume.

    API Endpoint Metric Template REST api/private/cli/volume size_used_by_snapshots conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-used-by-snapshots conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_expected_available","title":"volume_space_expected_available","text":"

    The size (in bytes) that should be available for the volume irrespective of available size in the aggregate.This is same as size-available for 'volume' guaranteed volumes.For 'none' guaranteed volumes this value is calculated as if the aggregate has enough backing disk space to fully support the volume's size.Similar to the size-available property, this does not include Snapshot reserve.This count gets reduced if snapshots consume space above Snapshot reserve threshold.This parameter is not supported on Infinite Volumes.

    API Endpoint Metric Template REST api/private/cli/volume expected_available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.expected-available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_available","title":"volume_space_logical_available","text":"

    The size (in bytes) that is logically available in the volume.This is the amount of free space available considering space saved by the storage efficiency features as being used.This does not include Snapshot reserve.This parameter is not supported on FlexGroups or Infinite Volumes.

    API Endpoint Metric Template REST api/private/cli/volume logical_available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_used","title":"volume_space_logical_used","text":"

    The size (in bytes) that is logically used in the volume.This value includes all the space saved by the storage efficiency features along with the physically used space.This does not include Snapshot reserve but does consider Snapshot spill.This parameter is not supported on FlexGroups or Infinite Volumes.

    API Endpoint Metric Template REST api/private/cli/volume logical_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_used_by_afs","title":"volume_space_logical_used_by_afs","text":"

    The size (in bytes) that is logically used by the active filesystem of the volume.This value differs from 'logical-used' by the amount of Snapshot spill that exceeds Snapshot reserve.This parameter is not supported on FlexGroups or Infinite Volumes.

    API Endpoint Metric Template REST api/private/cli/volume logical_used_by_afs conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-used-by-afs conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_used_by_snapshots","title":"volume_space_logical_used_by_snapshots","text":"

    The size (in bytes) that is logically used across all Snapshot copies in the volume. This value differs from 'size-used-by-snapshots' by the space saved by the storage efficiency features across the Snapshot copies.This parameter is not supported on FlexGroups or Infinite Volumes.

    API Endpoint Metric Template REST api/private/cli/volume logical_used_by_snapshots conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-used-by-snapshots conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_used_percent","title":"volume_space_logical_used_percent","text":"

    Percentage of the logical used size of the volume.This parameter is not supported on FlexGroups or Infinite Volumes.

    API Endpoint Metric Template REST api/private/cli/volume logical_used_percent conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-used-percent conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_performance_tier_inactive_user_data","title":"volume_space_performance_tier_inactive_user_data","text":"

    The size that is physically used in the performance tier of the volume and has a cold temperature. This parameter is only supported if the volume is in an aggregate that is either attached to object store or could be attached to an object store.

    API Endpoint Metric Template REST api/private/cli/volume performance_tier_inactive_user_data conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.performance-tier-inactive-user-data conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_performance_tier_inactive_user_data_percent","title":"volume_space_performance_tier_inactive_user_data_percent","text":"

    The size (in percent) that is physically used in the performance tier of the volume and has a cold temperature. This parameter is only supported if the volume is in an aggregate that is either attached to object store or could be attached to an object store.

    API Endpoint Metric Template REST api/private/cli/volume performance_tier_inactive_user_data_percent conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.performance-tier-inactive-user-data-percent conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_physical_used","title":"volume_space_physical_used","text":"

    The size (in bytes) that is physically used in the volume.This differs from 'total-used' space by the space that is reserved for future writes.The value includes blocks in use by Snapshot copies.This field is valid only if the volume is online.

    API Endpoint Metric Template REST api/private/cli/volume virtual_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.physical-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_physical_used_percent","title":"volume_space_physical_used_percent","text":"

    The size (in percent) that is physically used in the volume.The percentage is based on volume size including the space reserved for Snapshot copies.This field is valid only if the volume is online.

    API Endpoint Metric Template REST api/private/cli/volume virtual_used_percent conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.physical-used-percent conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_total_data","title":"volume_total_data","text":"

    This metric represents the total amount of data that has been read from and written to a specific volume.

    API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_read, bytes_writtenUnit: Type: Base: conf/restperf/9.12.0/volume.yaml ZAPI volume read_data, write_dataUnit: Type: Base: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_total_ops","title":"volume_total_ops","text":"

    Number of operations per second serviced by the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_write_data","title":"volume_write_data","text":"

    Bytes written per second

    API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_writtenUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_write_latency","title":"volume_write_latency","text":"

    Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time

    API Endpoint Metric Template REST api/cluster/counter/tables/volume write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_write_ops","title":"volume_write_ops","text":"

    Number of write operations per second to the volume

    API Endpoint Metric Template REST api/cluster/counter/tables/volume total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#vscan_scan_latency","title":"vscan_scan_latency","text":"

    Average scan latency

    API Endpoint Metric Template REST api/cluster/counter/tables/vscan scan.latencyUnit: microsecType: averageBase: scan.requests conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scan_latencyUnit: microsecType: averageBase: scan_latency_base conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#vscan_scan_request_dispatched_rate","title":"vscan_scan_request_dispatched_rate","text":"

    Total number of scan requests sent to the scanner per second

    API Endpoint Metric Template REST api/cluster/counter/tables/vscan scan.request_dispatched_rateUnit: per_secType: rateBase: conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scan_request_dispatched_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#vscan_scanner_stats_pct_cpu_used","title":"vscan_scanner_stats_pct_cpu_used","text":"

    Percentage CPU utilization on scanner calculated over the last 15 seconds.

    API Endpoint Metric Template REST api/cluster/counter/tables/vscan scanner.stats_percent_cpu_usedUnit: noneType: rawBase: conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scanner_stats_pct_cpu_usedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#vscan_scanner_stats_pct_mem_used","title":"vscan_scanner_stats_pct_mem_used","text":"

    Percentage RAM utilization on scanner calculated over the last 15 seconds.

    API Endpoint Metric Template REST api/cluster/counter/tables/vscan scanner.stats_percent_mem_usedUnit: noneType: rawBase: conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scanner_stats_pct_mem_usedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#vscan_scanner_stats_pct_network_used","title":"vscan_scanner_stats_pct_network_used","text":"

    Percentage network utilization on scanner calculated for the last 15 seconds.

    API Endpoint Metric Template REST api/cluster/counter/tables/vscan scanner.stats_percent_network_usedUnit: noneType: rawBase: conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scanner_stats_pct_network_usedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#wafl_avg_msg_latency","title":"wafl_avg_msg_latency","text":"

    Average turnaround time for WAFL messages in milliseconds.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl average_msg_latencyUnit: millisecType: averageBase: msg_total conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl avg_wafl_msg_latencyUnit: millisecType: averageBase: wafl_msg_total conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_avg_non_wafl_msg_latency","title":"wafl_avg_non_wafl_msg_latency","text":"

    Average turnaround time for non-WAFL messages in milliseconds.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl average_non_wafl_msg_latencyUnit: millisecType: averageBase: non_wafl_msg_total conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl avg_non_wafl_msg_latencyUnit: millisecType: averageBase: non_wafl_msg_total conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_avg_repl_msg_latency","title":"wafl_avg_repl_msg_latency","text":"

    Average turnaround time for replication WAFL messages in milliseconds.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl average_replication_msg_latencyUnit: millisecType: averageBase: replication_msg_total conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl avg_wafl_repl_msg_latencyUnit: millisecType: averageBase: wafl_repl_msg_total conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_cp_count","title":"wafl_cp_count","text":"

    Array of counts of different types of Consistency Points (CP).

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl cp_countUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl cp_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_cp_phase_times","title":"wafl_cp_phase_times","text":"

    Array of percentage time spent in different phases of Consistency Point (CP).

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl cp_phase_timesUnit: percentType: percentBase: total_cp_msecs conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl cp_phase_timesUnit: percentType: percentBase: total_cp_msecs conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_memory_free","title":"wafl_memory_free","text":"

    The current WAFL memory available in the system.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl memory_freeUnit: mbType: rawBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_memory_freeUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_memory_used","title":"wafl_memory_used","text":"

    The current WAFL memory used in the system.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl memory_usedUnit: mbType: rawBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_memory_usedUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_msg_total","title":"wafl_msg_total","text":"

    Total number of WAFL messages per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl msg_totalUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_msg_totalUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_non_wafl_msg_total","title":"wafl_non_wafl_msg_total","text":"

    Total number of non-WAFL messages per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl non_wafl_msg_totalUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl non_wafl_msg_totalUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_read_io_type","title":"wafl_read_io_type","text":"

    Percentage of reads served from buffer cache, external cache, or disk.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl read_io_typeUnit: percentType: percentBase: read_io_type_base conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl read_io_typeUnit: percentType: percentBase: read_io_type_base conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_cache","title":"wafl_reads_from_cache","text":"

    WAFL reads from cache.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_cacheUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_cacheUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_cloud","title":"wafl_reads_from_cloud","text":"

    WAFL reads from cloud storage.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_cloudUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_cloudUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_cloud_s2c_bin","title":"wafl_reads_from_cloud_s2c_bin","text":"

    WAFL reads from cloud storage via s2c bin.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_cloud_s2c_binUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_cloud_s2c_binUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_disk","title":"wafl_reads_from_disk","text":"

    WAFL reads from disk.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_diskUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_diskUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_ext_cache","title":"wafl_reads_from_ext_cache","text":"

    WAFL reads from external cache.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_external_cacheUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_ext_cacheUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_fc_miss","title":"wafl_reads_from_fc_miss","text":"

    WAFL reads from remote volume for fc_miss.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_fc_missUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_fc_missUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_pmem","title":"wafl_reads_from_pmem","text":"

    Wafl reads from persistent mmeory.

    API Endpoint Metric Template ZAPI perf-object-get-instances wafl wafl_reads_from_pmemUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_ssd","title":"wafl_reads_from_ssd","text":"

    WAFL reads from SSD.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_ssdUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_ssdUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_repl_msg_total","title":"wafl_repl_msg_total","text":"

    Total number of replication WAFL messages per second.

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl replication_msg_totalUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_repl_msg_totalUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_total_cp_msecs","title":"wafl_total_cp_msecs","text":"

    Milliseconds spent in Consistency Point (CP).

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl total_cp_msecsUnit: millisecType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl total_cp_msecsUnit: millisecType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_total_cp_util","title":"wafl_total_cp_util","text":"

    Percentage of time spent in a Consistency Point (CP).

    API Endpoint Metric Template REST api/cluster/counter/tables/wafl total_cp_utilUnit: percentType: percentBase: cpu_elapsed_time conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl total_cp_utilUnit: percentType: percentBase: cpu_elapsed_time conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"plugins/","title":"Plugins","text":""},{"location":"plugins/#built-in-plugins","title":"Built-in Plugins","text":"

    The plugin feature allows users to manipulate and customize data collected by collectors without changing the collectors. Plugins have the same capabilities as collectors and therefore can collect data on their own as well. Furthermore, multiple plugins can be put in a pipeline to perform more complex operations.

    Harvest architecture defines two types of plugins:

    Built-in generic - Statically compiled, generic plugins. \"Generic\" means the plugin is collector-agnostic. These plugins are provided in this package and listed in the right sidebar.

    Built-in custom - These plugins are statically compiled, collector-specific plugins. Their source code should reside inside the plugins/ subdirectory of the collector package (e.g. (cmd/collectors/rest/plugins/svm/svm.go)[https://github.com/NetApp/harvest/blob/main/cmd/collectors/rest/plugins/svm/svm.go]). Custom plugins have access to all the parameters of their parent collector and should therefore be treated with great care.

    This documentation gives an overview of builtin plugins. For other plugins, see their respective documentation. For writing your own plugin, see Developer's documentation.

    Note: the rules are executed in the same order as you've added them.

    "},{"location":"plugins/#aggregator","title":"Aggregator","text":"

    Aggregator creates a new collection of metrics (Matrix) by summarizing and/or averaging metric values from an existing Matrix for a given label. For example, if the collected metrics are for volumes, you can create an aggregation for nodes or svms.

    "},{"location":"plugins/#rule-syntax","title":"Rule syntax","text":"

    simplest case:

    plugins:\n  Aggregator:\n    - LABEL\n# will aggregate a new Matrix based on target label LABEL\n

    If you want to specify which labels should be included in the new instances, you can add those space-seperated after LABEL:

        - LABEL LABEL1,LABEL2\n    # same, but LABEL1 and LABEL2 will be copied into the new instances\n    # (default is to only copy LABEL and any global labels (such as cluster and datacenter)\n

    Or include all labels:

        - LABEL ...\n    # copy all labels of the original instance\n

    By default, aggregated metrics will be prefixed with LABEL. For example if the object of the original Matrix is volume (meaning metrics are prefixed with volume_) and LABEL is aggr, then the metric volume_read_ops will become aggr_volume_read_ops, etc. You can override this by providing the <>OBJ using the following syntax:

        - LABEL<>OBJ\n    # use OBJ as the object of the new matrix, e.g. if the original object is \"volume\" and you\n    # want to leave metric names unchanged, use \"volume\"\n

    Finally, sometimes you only want to aggregate instances with a specific label value. You can use <VALUE> for that ( optionally follow by OBJ):

        - LABEL<VALUE>\n    # aggregate all instances if LABEL has value VALUE\n    - LABEL<`VALUE`>\n    # same, but VALUE is regular expression\n    - LABEL<LABELX=`VALUE`>\n    # same, but check against \"LABELX\" (instead of \"LABEL\")\n

    Examples:

    plugins:\n  Aggregator:\n    # will aggregate metrics of the aggregate. The labels \"node\" and \"type\" are included in the new instances\n    - aggr node type\n    # aggregate instances if label \"type\" has value \"flexgroup\"\n    # include all original labels\n    - type<flexgroup> ...\n    # aggregate all instances if value of \"volume\" ends with underscore and 4 digits\n    - volume<`_\\d{4}$`>\n
    "},{"location":"plugins/#aggregation-rules","title":"Aggregation rules","text":"

    The plugin tries to intelligently aggregate metrics based on a few rules:

    • Sum - the default rule, if no other rules apply
    • Average - if any of the following is true:
      • metric name has suffix _percent or _percentage
      • metric name has prefix average_ or avg_
      • metric has property (metric.GetProperty()) percent or average
    • Weighted Average - applied if metric has property average and suffix _latency and if there is a matching _ops metric. (This is currently only matching to ZapiPerf metrics, which use the Property field of metrics.)
    • Ignore - metrics created by some plugins, such as value_to_num by LabelAgent
    "},{"location":"plugins/#max","title":"Max","text":"

    Max creates a new collection of metrics (Matrix) by calculating max of metric values from an existing Matrix for a given label. For example, if the collected metrics are for disks, you can create max at the node or aggregate level. Refer Max Examples for more details.

    "},{"location":"plugins/#max-rule-syntax","title":"Max Rule syntax","text":"

    simplest case:

    plugins:\n  Max:\n    - LABEL\n# create a new Matrix of max values on target label LABEL\n

    If you want to specify which labels should be included in the new instances, you can add those space-seperated after LABEL:

        - LABEL LABEL1,LABEL2\n    # similar to the above example, but LABEL1 and LABEL2 will be copied into the new instances\n    # (default is to only copy LABEL and all global labels (such as cluster and datacenter)\n

    Or include all labels:

        - LABEL ...\n    # copy all labels of the original instance\n

    By default, metrics will be prefixed with LABEL. For example if the object of the original Matrix is volume (meaning metrics are prefixed with volume_) and LABEL is aggr, then the metric volume_read_ops will become aggr_volume_read_ops. You can override this using the <>OBJ pattern shown below:

        - LABEL<>OBJ\n    # use OBJ as the object of the new matrix, e.g. if the original object is \"volume\" and you\n    # want to leave metric names unchanged, use \"volume\"\n

    Finally, sometimes you only want to generate instances with a specific label value. You can use <VALUE> for that ( optionally followed by OBJ):

        - LABEL<VALUE>\n    # aggregate all instances if LABEL has value VALUE\n    - LABEL<`VALUE`>\n    # same, but VALUE is regular expression\n    - LABEL<LABELX=`VALUE`>\n    # same, but check against \"LABELX\" (instead of \"LABEL\")\n
    "},{"location":"plugins/#max-examples","title":"Max Examples","text":"
    plugins:\n  Max:\n    # will create max of each aggregate metric. All metrics will be prefixed with aggr_disk_max. All labels are included in the new instances\n    - aggr<>aggr_disk_max ...\n    # calculate max instances if label \"disk\" has value \"1.1.0\". Prefix with disk_max\n    # include all original labels\n    - disk<1.1.0>disk_max ...\n    # max of all instances if value of \"volume\" ends with underscore and 4 digits\n    - volume<`_\\d{4}$`>\n
    "},{"location":"plugins/#labelagent","title":"LabelAgent","text":"

    LabelAgent are used to manipulate instance labels based on rules. You can define multiple rules, here is an example of what you could add to the yaml file of a collector:

    plugins:\n  LabelAgent:\n    # our rules:\n    split: node `/` ,aggr,plex,disk\n    replace_regex: node node `^(node)_(\\d+)_.*$` `Node-$2`\n

    Note: Labels for creating new label should use name defined in right side of =>. If not present then left side of => is used.

    "},{"location":"plugins/#split","title":"split","text":"

    Rule syntax:

    split:\n  - LABEL `SEP` LABEL1,LABEL2,LABEL3\n# source label - separator - comma-seperated target labels\n

    Splits the value of a given label by separator SEP and creates new labels if their number matches to the number of target labels defined in rule. To discard a subvalue, just add a redundant , in the names of the target labels.

    Example:

    split:\n  - node `/` ,aggr,plex,disk\n# will split the value of \"node\" using separator \"/\"\n# will expect 4 values: first will be discarded, remaining\n# three will be stored as labels \"aggr\", \"plex\" and \"disk\"\n
    "},{"location":"plugins/#split_regex","title":"split_regex","text":"

    Does the same as split but uses a regular expression instead of a separator.

    Rule syntax:

    split_regex:\n  - LABEL `REGEX` LABEL1,LABEL2,LABEL3\n

    Example:

    split_regex:\n  - node `.*_(ag\\d+)_(p\\d+)_(d\\d+)` aggr,plex,disk\n# will look for \"_ag\", \"_p\", \"_d\", each followed by one\n# or more numbers, if there is a match, the submatches\n# will be stored as \"aggr\", \"plex\" and \"disk\"\n
    "},{"location":"plugins/#split_pairs","title":"split_pairs","text":"

    Rule syntax:

    split_pairs:\n  - LABEL `SEP1` `SEP2`\n# source label - pair separator - key-value separator\n

    Extracts key-value pairs from the value of source label LABEL. Note that you need to add these keys in the export options, otherwise they will not be exported.

    Example:

    split_pairs:\n  - comment ` ` `:`\n# will split pairs using a single space and split key-values using colon\n# e.g. if comment=\"owner:jack contact:some@email\", the result will be\n# two new labels: owner=\"jack\" and contact=\"some@email\"\n
    "},{"location":"plugins/#join","title":"join","text":"

    Join multiple label values using separator SEP and create a new label.

    Rule syntax:

    join:\n  - LABEL `SEP` LABEL1,LABEL2,LABEL3\n# target label - separator - comma-seperated source labels\n

    Example:

    join:\n  - plex_long `_` aggr,plex\n# will look for the values of labels \"aggr\" and \"plex\",\n# if they are set, a new \"plex_long\" label will be added\n# by joining their values with \"_\"\n
    "},{"location":"plugins/#replace","title":"replace","text":"

    Substitute substring OLD with NEW in label SOURCE and store in TARGET. Note that target and source labels can be the same.

    Rule syntax:

    replace:\n  - SOURCE TARGET `OLD` `NEW`\n# source label - target label - substring to replace - replace with\n

    Example:

    replace:\n  - node node_short `node_` ``\n# this rule will just remove \"node_\" from all values of label\n# \"node\". E.g. if label is \"node_jamaica1\", it will rewrite it \n# as \"jamaica1\"\n
    "},{"location":"plugins/#replace_regex","title":"replace_regex","text":"

    Same as replace, but will use a regular expression instead of OLD. Note you can use $n to specify nth submatch in NEW.

    Rule syntax:

    replace_regex:\n  - SOURCE TARGET `REGEX` `NEW`\n# source label - target label - substring to replace - replace with\n

    Example:

    replace_regex:\n  - node node `^(node)_(\\d+)_.*$` `Node-$2`\n# if there is a match, will capitalize \"Node\" and remove suffixes.\n# E.g. if label is \"node_10_dc2\", it will rewrite it as\n# will rewrite it as \"Node-10\"\n
    "},{"location":"plugins/#exclude_equals","title":"exclude_equals","text":"

    Exclude each instance, if the value of LABEL is exactly VALUE. Exclude means that metrics for this instance will not be exported.

    Rule syntax:

    exclude_equals:\n  - LABEL `VALUE`\n# label name - label value\n

    Example:

    exclude_equals:\n  - vol_type `flexgroup_constituent`\n# all instances, which have label \"vol_type\" with value\n# \"flexgroup_constituent\" will not be exported\n
    "},{"location":"plugins/#exclude_contains","title":"exclude_contains","text":"

    Same as exclude_equals, but all labels that contain VALUE will be excluded

    Rule syntax:

    exclude_contains:\n  - LABEL `VALUE`\n# label name - label value\n

    Example:

    exclude_contains:\n  - vol_type `flexgroup_`\n# all instances, which have label \"vol_type\" which contain\n# \"flexgroup_\" will not be exported\n
    "},{"location":"plugins/#exclude_regex","title":"exclude_regex","text":"

    Same as exclude_equals, but will use a regular expression and all matching instances will be excluded.

    Rule syntax:

    exclude_regex:\n  - LABEL `REGEX`\n# label name - regular expression\n

    Example:

    exclude_regex:\n  - vol_type `^flex`\n# all instances, which have label \"vol_type\" which starts with\n# \"flex\" will not be exported\n
    "},{"location":"plugins/#include_equals","title":"include_equals","text":"

    Include each instance, if the value of LABEL is exactly VALUE. Include means that metrics for this instance will be exported and instances that do not match will not be exported.

    Rule syntax:

    include_equals:\n  - LABEL `VALUE`\n# label name - label value\n

    Example:

    include_equals:\n  - vol_type `flexgroup_constituent`\n# all instances, which have label \"vol_type\" with value\n# \"flexgroup_constituent\" will be exported\n
    "},{"location":"plugins/#include_contains","title":"include_contains","text":"

    Same as include_equals, but all labels that contain VALUE will be included

    Rule syntax:

    include_contains:\n  - LABEL `VALUE`\n# label name - label value\n

    Example:

    include_contains:\n  - vol_type `flexgroup_`\n# all instances, which have label \"vol_type\" which contain\n# \"flexgroup_\" will be exported\n
    "},{"location":"plugins/#include_regex","title":"include_regex","text":"

    Same as include_equals, but a regular expression will be used for inclusion. Similar to the other includes, all matching instances will be included and all non-matching will not be exported.

    Rule syntax:

    include_regex:\n  - LABEL `REGEX`\n# label name - regular expression\n

    Example:

    include_regex:\n  - vol_type `^flex`\n# all instances, which have label \"vol_type\" which starts with\n# \"flex\" will be exported\n
    "},{"location":"plugins/#value_mapping","title":"value_mapping","text":"

    value_mapping was deprecated in 21.11 and removed in 22.02. Use value_to_num mapping instead.

    "},{"location":"plugins/#value_to_num","title":"value_to_num","text":"

    Map values of a given label to a numeric metric (of type uint8). This rule maps values of a given label to a numeric metric (of type unit8). Healthy is mapped to 1 and all non-healthy values are mapped to 0.

    This is handy to manipulate the data in the DB or Grafana (e.g. change color based on status or create alert).

    Note that you don't define the numeric values yourself, instead, you only provide the possible (expected) values, the plugin will map each value to its index in the rule.

    Rule syntax:

    value_to_num:\n  - METRIC LABEL ZAPI_VALUE REST_VALUE `N`\n# map values of LABEL to 1 if it is ZAPI_VALUE or REST_VALUE\n# otherwise, value of METRIC is set to N\n

    The default value N is optional, if no default value is given and the label value does not match any of the given values, the metric value will not be set.

    Examples:

    value_to_num:\n  - status state up online `0`\n# a new metric will be created with the name \"status\"\n# if an instance has label \"state\" with value \"up\", the metric value will be 1,\n# if it's \"online\", the value will be set to 1,\n# if it's any other value, it will be set to the specified default, 0\n
    value_to_num:\n  - status state up online `4`\n# metric value will be set to 1 if \"state\" is \"up\", otherwise to **4**\n
    value_to_num:\n  - status outage - - `0` #ok_value is empty value. \n# metric value will be set to 1 if \"outage\" is empty, if it's any other value, it will be set to the default, 0\n# '-' is a special symbol in this mapping, and it will be converted to blank while processing.\n
    "},{"location":"plugins/#value_to_num_regex","title":"value_to_num_regex","text":"

    Same as value_to_num, but will use a regular expression. All matches are mapped to 1 and non-matches are mapped to 0.

    This is handy to manipulate the data in the DB or Grafana (e.g. change color based on status or create alert).

    Note that you don't define the numeric values, instead, you provide the expected values and the plugin will map each value to its index in the rule.

    Rule syntax:

    value_to_num_regex:\n  - METRIC LABEL ZAPI_REGEX REST_REGEX `N`\n# map values of LABEL to 1 if it matches ZAPI_REGEX or REST_REGEX\n# otherwise, value of METRIC is set to N\n

    The default value N is optional, if no default value is given and the label value does not match any of the given values, the metric value will not be set.

    Examples:

    value_to_num_regex:\n  - certificateuser methods .*cert.*$ .*certificate.*$ `0`\n# a new metric will be created with the name \"certificateuser\"\n# if an instance has label \"methods\" with value contains \"cert\", the metric value will be 1,\n# if value contains \"certificate\", the value will be set to 1,\n# if value doesn't contain \"cert\" and \"certificate\", it will be set to the specified default, 0\n
    value_to_num_regex:\n  - status state ^up$ ^ok$ `4`\n# metric value will be set to 1 if label \"state\" matches regex, otherwise set to **4**\n
    "},{"location":"plugins/#metricagent","title":"MetricAgent","text":"

    MetricAgent are used to manipulate metrics based on rules. You can define multiple rules, here is an example of what you could add to the yaml file of a collector:

    plugins:\n  MetricAgent:\n    compute_metric:\n      - snapshot_maxfiles_possible ADD snapshot.max_files_available snapshot.max_files_used\n      - raid_disk_count ADD block_storage.primary.disk_count block_storage.hybrid_cache.disk_count\n

    Note: Metric names used to create new metrics can come from the left or right side of the rename operator (=>) Note: The metric agent currently does not work for histogram or array metrics.

    "},{"location":"plugins/#compute_metric","title":"compute_metric","text":"

    This rule creates a new metric (of type float64) using the provided scalar or an existing metric value combined with a mathematical operation.

    You can provide a numeric value or a metric name with an operation. The plugin will use the provided number or fetch the value of a given metric, perform the requested mathematical operation, and store the result in new custom metric.

    Currently, we support these operations: ADD SUBTRACT MULTIPLY DIVIDE PERCENT

    Rule syntax:

    compute_metric:\n  - METRIC OPERATION METRIC1 METRIC2 METRIC3\n# target new metric - mathematical operation - input metric names \n# apply OPERATION on metric values of METRIC1, METRIC2 and METRIC3 and set result in METRIC\n# METRIC1, METRIC2, METRIC3 can be a scalar or an existing metric name.\n

    Examples:

    compute_metric:\n  - space_total ADD space_available space_used\n# a new metric will be created with the name \"space_total\"\n# if an instance has metric \"space_available\" with value \"1000\", and \"space_used\" with value \"400\",\n# the result value will be \"1400\" and set to metric \"space_total\".\n
    compute_metric:\n  - disk_count ADD primary.disk_count secondary.disk_count hybrid.disk_count\n# value of metric \"disk_count\" would be addition of all the given disk_counts metric values.\n# disk_count = primary.disk_count + secondary.disk_count + hybrid.disk_count\n
    compute_metric:\n  - files_available SUBTRACT files files_used\n# value of metric \"files_available\" would be subtraction of the metric value of files_used from metric value of files.\n# files_available = files - files_used\n
    compute_metric:\n  - total_bytes MULTIPLY bytes_per_sector sector_count\n# value of metric \"total_bytes\" would be multiplication of metric value of bytes_per_sector and metric value of sector_count.\n# total_bytes = bytes_per_sector * sector_count\n
    compute_metric:\n  - uptime MULTIPLY stats.power_on_hours 60 60\n# value of metric \"uptime\" would be multiplication of metric value of stats.power_on_hours and scalar value of 60 * 60.\n# total_bytes = bytes_per_sector * sector_count\n
    compute_metric:\n  - transmission_rate DIVIDE transfer.bytes_transferred transfer.total_duration\n# value of metric \"transmission_rate\" would be division of metric value of transfer.bytes_transferred by metric value of transfer.total_duration.\n# transmission_rate = transfer.bytes_transferred / transfer.total_duration\n
    compute_metric:\n  - inode_used_percent PERCENT inode_files_used inode_files_total\n# a new metric named \"inode_used_percent\" will be created by dividing the metric \"inode_files_used\" by \n#  \"inode_files_total\" and multiplying the result by 100.\n# inode_used_percent = inode_files_used / inode_files_total * 100\n
    "},{"location":"plugins/#changelog","title":"ChangeLog","text":"

    The ChangeLog plugin is a feature of Harvest, designed to detect and track changes related to the creation, modification, and deletion of an object. By default, it supports volume, svm, and node objects. Its functionality can be extended to track changes in other objects by making relevant changes in the template.

    Please note that the ChangeLog plugin requires the uuid label, which is unique, to be collected by the template. Without the uuid label, the plugin will not function.

    The ChangeLog feature only detects changes when Harvest is up and running. It does not detect changes that occur when Harvest is down. Additionally, the plugin does not detect changes in metric values by default, but it can be configured to do so.

    "},{"location":"plugins/#enabling-the-plugin","title":"Enabling the Plugin","text":"

    The plugin can be enabled in the templates under the plugins section.

    For volume, svm, and node objects, you can enable the plugin with the following configuration:

    plugins:\n  - ChangeLog\n

    For other objects, you need to specify the labels to track in the plugin configuration. These labels should be relevant to the object you want to track. If these labels are not specified in the template, the plugin will not be able to track changes for the object.

    Here's an example of how to enable the plugin for an aggregate object:

    plugins:\n  - ChangeLog:\n      track:\n        - aggr\n        - node\n        - state\n

    In the above configuration, the plugin will track changes in the aggr, node, and state labels for the aggregate object.

    "},{"location":"plugins/#default-tracking-for-svm-node-volume","title":"Default Tracking for svm, node, volume","text":"

    By default, the plugin tracks changes in the following labels for svm, node, and volume objects:

    • svm: svm, state, type, anti_ransomware_state
    • node: node, location, healthy
    • volume: node, volume, svm, style, type, aggr, state, status

    Other objects are not tracked by default.

    These default settings can be overwritten as needed in the relevant templates. For instance, if you want to track junction_path label and size_total metric for Volume, you can overwrite this in the volume template.

    plugins:\n  - ChangeLog:\n      - track:\n        - node\n        - volume\n        - svm\n        - style\n        - type\n        - aggr\n        - state\n        - status\n        - junction_path\n        - size_total\n
    "},{"location":"plugins/#change-types-and-metrics","title":"Change Types and Metrics","text":"

    The ChangeLog plugin publishes a metric with various labels providing detailed information about the change when an object is created, modified, or deleted.

    "},{"location":"plugins/#object-creation","title":"Object Creation","text":"

    When a new object is created, the ChangeLog plugin will publish a metric with the following labels:

    Label Description object name of the ONTAP object that was changed op type of change that was made metric value timestamp when Harvest captured the change. 1698735558 in the example below

    Example of metric shape for object creation:

    change_log{aggr=\"umeng_aff300_aggr2\", cluster=\"umeng-aff300-01-02\", datacenter=\"u2\", index=\"0\", instance=\"localhost:12993\", job=\"prometheus\", node=\"umeng-aff300-01\", object=\"volume\", op=\"create\", style=\"flexvol\", svm=\"harvest\", volume=\"harvest_demo\"} 1698735558\n
    "},{"location":"plugins/#object-modification","title":"Object Modification","text":"

    When an existing object is modified, the ChangeLog plugin will publish a metric with the following labels:

    Label Description object Name of the ONTAP object that was changed op Type of change that was made track Property of the object which was modified new_value New value of the object after the change (only available for label changes and not for metric changes) old_value Previous value of the object before the change (only available for label changes and not for metric changes) metric value Timestamp when Harvest captured the change. 1698735677 in the example below category Type of the change, indicating whether it is a metric or a label change

    Example of metric shape for object modification for label:

    change_log{aggr=\"umeng_aff300_aggr2\", category=\"label\", cluster=\"umeng-aff300-01-02\", datacenter=\"u2\", index=\"1\", instance=\"localhost:12993\", job=\"prometheus\", new_value=\"offline\", node=\"umeng-aff300-01\", object=\"volume\", old_value=\"online\", op=\"update\", style=\"flexvol\", svm=\"harvest\", track=\"state\", volume=\"harvest_demo\"} 1698735677\n

    Example of metric shape for metric value change:

    change_log{aggr=\"umeng_aff300_aggr2\", category=\"metric\", cluster=\"umeng-aff300-01-02\", datacenter=\"u2\", index=\"3\", instance=\"localhost:12993\", job=\"prometheus\", node=\"umeng-aff300-01\", object=\"volume\", op=\"metric_change\", track=\"volume_size_total\", svm=\"harvest\", volume=\"harvest_demo\"} 1698735800\n
    "},{"location":"plugins/#object-deletion","title":"Object Deletion","text":"

    When an object is deleted, the ChangeLog plugin will publish a metric with the following labels:

    Label Description object name of the ONTAP object that was changed op type of change that was made metric value timestamp when Harvest captured the change. 1698735708 in the example below

    Example of metric shape for object deletion:

    change_log{aggr=\"umeng_aff300_aggr2\", cluster=\"umeng-aff300-01-02\", datacenter=\"u2\", index=\"2\", instance=\"localhost:12993\", job=\"prometheus\", node=\"umeng-aff300-01\", object=\"volume\", op=\"delete\", style=\"flexvol\", svm=\"harvest\", volume=\"harvest_demo\"} 1698735708\n
    "},{"location":"plugins/#viewing-the-metrics","title":"Viewing the Metrics","text":"

    You can view the metrics published by the ChangeLog plugin in the ChangeLog Monitor dashboard in Grafana. This dashboard provides a visual representation of the changes tracked by the plugin for volume, svm, and node objects.

    "},{"location":"prepare-7mode-clusters/","title":"ONTAP 7mode","text":"

    NetApp Harvest requires login credentials to access monitored hosts. Although, a generic admin account can be used, it is best practice to create a dedicated monitoring account with the least privilege access.

    ONTAP 7-mode supports only username / password based authentication with NetApp Harvest. Harvest communicates with monitored systems exclusively via HTTPS, which is not enabled by default in Data ONTAP 7-mode. Login as a user with full administrative privileges and execute the following steps.

    "},{"location":"prepare-7mode-clusters/#enabling-https-and-tls-ontap-7-mode-only","title":"Enabling HTTPS and TLS (ONTAP 7-mode only)","text":"

    Verify SSL is configured

    secureadmin status ssl\n

    If ssl is \u2018active\u2019 continue. If not, setup SSL and be sure to choose a Key length (bits) of 2048:

    secureadmin setup ssl\n
    SSL Setup has already been done before. Do you want to proceed? [no] yes\nCountry Name (2 letter code) [US]: NL\nState or Province Name (full name) [California]: Noord-Holland\nLocality Name (city, town, etc.) [Santa Clara]: Schiphol\nOrganization Name (company) [Your Company]: NetApp\nOrganization Unit Name (division): SalesEngineering\nCommon Name (fully qualified domain name) [sdt-7dot1a.nltestlab.hq.netapp.com]:\nAdministrator email: noreply@netapp.com\nDays until expires [5475] :5475 Key length (bits) [512] :2048\n

    Enable management via SSL and enable TLS

    options httpd.admin.ssl.enable on\noptions tls.enable on  \n
    "},{"location":"prepare-7mode-clusters/#creating-ontap-user","title":"Creating ONTAP user","text":""},{"location":"prepare-7mode-clusters/#create-the-role-with-required-capabilities","title":"Create the role with required capabilities","text":"
    role add netapp-harvest-role -c \"Role for performance monitoring by NetApp Harvest\" -a login-http-admin,api-system-get-version,api-system-get-info,api-perf-object-*,api-emsautosupport-log \n
    "},{"location":"prepare-7mode-clusters/#create-a-group-for-this-role","title":"Create a group for this role","text":"
    useradmin group add netapp-harvest-group -c \"Group for performance monitoring by NetApp Harvest\" -r netapp-harvest-role \n
    "},{"location":"prepare-7mode-clusters/#create-a-user-for-the-role-and-enter-the-password-when-prompted","title":"Create a user for the role and enter the password when prompted","text":"
    useradmin user add netapp-harvest -c \"User account for performance monitoring by NetApp Harvest\" -n \"NetApp Harvest\" -g netapp-harvest-group\n

    The user is now created and can be configured for use by NetApp Harvest.

    "},{"location":"prepare-cdot-clusters/","title":"ONTAP cDOT","text":""},{"location":"prepare-cdot-clusters/#prepare-ontap-cdot-cluster","title":"Prepare ONTAP cDOT cluster","text":"

    NetApp Harvest requires login credentials to access monitored hosts. Although a generic admin account can be used, it is better to create a dedicated read-only monitoring account.

    In the examples below, the user, group, roles, etc., use a naming convention of netapp-harvest. These can be modified as needed to match your organizational needs.

    There are few steps required to prepare each system for monitoring. Harvest supports two authentication styles (auth_style) to connect to ONTAP clusters. These are basic_auth or certificate_auth. Both work well, but if you're starting fresh, the recommendation is to create a read-only harvest user on your ONTAP server and use certificate-based TLS authentication.

    Here's a summary of what we're going to do

    1. Create a read-only ONTAP role with the necessary capabilities that Harvest will use to auth and collect data
    2. Create a user account using the role created in step #1
    3. Update the harvest.yml file to use the user account and password created in step #2 and start Harvest.

    There are two ways to create a read-only ONTAP role. Pick the one that best fits your needs.

    • Create a role with read-only access to all API objects via System Manager.
    • Create a role with read-only access to the limited set of APIs Harvest collects via ONTAP's command line interface (CLI).
    "},{"location":"prepare-cdot-clusters/#system-manager","title":"System Manager","text":"

    Open System Manager. Click on CLUSTER in the left menu bar, Settings and Users and Roles.

    In the right column, under Roles, click on Add to add a new role.

    Choose a role name (e.g. harvest2-role). In the REST API PATH field, type /api and select Read-Only for ACCESS. Click on Save.

    In the left column, under Users, click on Add to create a new user. Choose a username. Under Role, select the role that we just created. Under User Login Methods select ONTAPI, and one of the two authentication methods. Press the Add button and select HTTP and one of the authentication methods. Type in a password if you chose Password. Click on Save

    If you chose Password, you can add the username and password to the Harvest configuration file and start Harvest. If you chose Certificate jump to Using Certificate Authentication to generate certificates files.

    System Manager Classic interface

    Open System Manager. Click on the Settings icon in the top-right corner of the window.

    Click on Roles in the left menu bar and click Add. Choose a role name (e.g. harvest2-role).

    Under Role Attributes click on Add, under Command type DEFAULT, leave Query empty, select readonly under Access Level, click on OK and Add.

    After you click on Add, this is what you should see:

    Now we need to create a user. Click on Users in the left menu bar and Add. Choose a username and password. Under User Login Methods click on Add, select ontapi as Application and select the role that we just created as Role. Repeat by clicking on Add, select http as Application and select the role that we just created as Role. Click on Add in the pop-up window to save.

    "},{"location":"prepare-cdot-clusters/#ontap-cli","title":"ONTAP CLI","text":"

    We are going to:

    1. create a Harvest role with read-only access to a limited set of objects
    2. create a Harvest user and assign it to that role

    Login to the CLI of your cDOT ONTAP system using SSH.

    "},{"location":"prepare-cdot-clusters/#least-privilege-approach","title":"Least-privilege approach","text":"

    Verify there are no errors when you copy/paste these. Warnings are fine.

    security login role create -role harvest2-role -access readonly -cmddirname \"cluster\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"event notification destination show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"event notification destination\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"lun\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"metrocluster configuration-settings mediator add\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"network fcp adapter show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"network interface\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"network port show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"network route show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"qos adaptive-policy-group\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"qos policy-group\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"qos workload show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"security\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"snapmirror\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"statistics\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage aggregate\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage disk\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage encryption disk\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage failover show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage iscsi-initiator show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage shelf\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system chassis fru show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system health alert show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system health status show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system health subsystem show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system license show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system node\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system service-processor show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"version\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"volume\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"vserver\"\n
    "},{"location":"prepare-cdot-clusters/#create-harvest-user-and-associate-with-the-harvest-role","title":"Create harvest user and associate with the harvest role","text":"

    Use this for password authentication

    # If the harvest2 user does not exist, you will be prompted to enter a password\nsecurity login create -user-or-group-name harvest2 -application ontapi -role harvest2-role -authentication-method password\n

    Or this for certificate authentication

    security login create -user-or-group-name harvest2 -application ontapi -role harvest2-role -authentication-method cert\n
    "},{"location":"prepare-cdot-clusters/#create-rest-role","title":"Create REST role","text":"

    Replace $ADMIN_VSERVER with your SVM admin name.

    security login rest-role create -role harvest2-rest-role -access readonly -api /api -vserver $ADMIN_VSERVER\n
    Least-privilege approach for REST

    If you are on ONTAP version 9.14.X or later, instead of the above command, you can use the following commands to create a REST role with read-only access to a limited set of objects.

    Since REST roles are defined in terms of legacy roles, if you have already created a legacy role with the same name, you will need to delete it first or use a different name.

    security login rest-role create -role harvest-rest-role -access readonly -api /api/cloud/targets\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/counter/tables\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/mediators\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/metrocluster/diagnostics\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/nodes\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/ntp/servers\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/peers\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/sensors\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/name-services/ldap\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/name-services/nis\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/ethernet/ports\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/fc/ports\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/ip/interfaces\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/ip/ports\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/ip/routes\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/support/alerts\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/cifs/services\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/cifs/sessions\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/cifs/shares\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/locks\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/ndmp/sessions\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/nfs/connected-clients\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/nfs/export-policies\n# s3 is buggy in 9.15, use protocols endpoint instead. See https://mysupport.netapp.com/site/bugs-online/product/ONTAP/JiraNgage/CONTAP-210232\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols\n# security login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/s3/buckets\n# security login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/s3/services\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/accounts\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/audit/destinations\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/certificates\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/login/messages\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/ssh\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/snapmirror/relationships\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/aggregates\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/disks\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/flexcache/flexcaches\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/luns\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/namespaces\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/qtrees\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/qos/policies\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/qos/workloads\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/quota/reports\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/shelves\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/volumes\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/auto-update\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/autosupport\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/ems/destinations\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/ems/events\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/ems/messages\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/svm/peers\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/svm/svms\n\n# Private CLI endpoints\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/aggr\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/cluster/date\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/disk\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/network/interface\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/node\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/qos/adaptive-policy-group\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/qos/policy-group\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/qos/workload\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/snapmirror\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/snapshot/policy\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/storage/failover\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/storage/shelf\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/system/chassis/fru\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/system/health/subsystem\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/volume\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/vserver\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/vserver/object-store-server/bucket/policy\n

    See #2991 for more information.

    "},{"location":"prepare-cdot-clusters/#associate-rest-role-with-harvest-user","title":"Associate REST role with harvest user","text":"

    Using password authentication

    security login create -user-or-group-name harvest2 -application http -role harvest2-rest-role -authentication-method password\n
    If you get an error command failed: duplicate entry when running the previous command

    Remove the previous entry and recreate like so:

    security login delete -user-or-group-name harvest2 -application http -authentication-method *\nsecurity login create -user-or-group-name harvest2 -application http -role harvest2-rest-role -authentication-method password\n

    Using certificate authentication

    security login create -user-or-group-name harvest2 -application http -role harvest2-rest-role -authentication-method cert\n
    If you get an error command failed: duplicate entry when running the previous command

    Remove the previous entry and recreate like so:

    security login delete -user-or-group-name harvest2 -application http -authentication-method *\nsecurity login create -user-or-group-name harvest2 -application http -role harvest2-rest-role -authentication-method cert\n
    "},{"location":"prepare-cdot-clusters/#verify-that-the-harvest-role-has-web-access","title":"Verify that the harvest role has web access","text":"
    vserver services web access show -role harvest2-role -name ontapi\nvserver services web access show -role harvest2-rest-role -name rest\nvserver services web access show -role harvest2-rest-role -name docs-api\n

    If any entries are missing, enable access by running the following. Replace $ADMIN_VSERVER with your SVM admin name.

    vserver services web access create -vserver $ADMIN_VSERVER -name ontapi -role harvest2-role\nvserver services web access create -vserver $ADMIN_VSERVER -name rest -role harvest2-rest-role\nvserver services web access create -vserver $ADMIN_VSERVER -name docs-api -role harvest2-rest-role\n

    "},{"location":"prepare-cdot-clusters/#7-mode-cli","title":"7-Mode CLI","text":"

    Login to the CLI of your 7-Mode ONTAP system (e.g. using SSH). First, we create a user role. If you want to give the user readonly access to all API objects, type in the following command:

    useradmin role modify harvest2-role -a login-http-admin,api-system-get-version, \\\napi-system-get-info,api-perf-object-*,api-ems-autosupport-log,api-diagnosis-status-get, \\\napi-lun-list-info,api-diagnosis-subsystem-config-get-iter,api-disk-list-info, \\\napi-diagnosis-config-get-iter,api-aggr-list-info,api-volume-list-info, \\\napi-storage-shelf-environment-list-info,api-qtree-list,api-quota-report\n
    "},{"location":"prepare-cdot-clusters/#using-certificate-authentication","title":"Using Certificate Authentication","text":"

    See comments here for troubleshooting client certificate authentication.

    Client certificate authentication allows you to authenticate with your ONTAP cluster without including username/passwords in your harvest.yml file. The process to set up client certificates is straightforward, although self-signed certificates introduce more work as does Go's strict treatment of common names.

    Unless you've installed production certificates on your ONTAP cluster, you'll need to replace your cluster's common-name-based self-signed certificates with a subject alternative name-based certificate. After that step is completed, we'll create client certificates and add those for passwordless login.

    If you can't or don't want to replace your ONTAP cluster certificates, there are some workarounds. You can

    • Use use_insecure_tls: true in your harvest.yml to disable certificate verification
    • Change your harvest.yml to connect via hostname instead of IP address
    "},{"location":"prepare-cdot-clusters/#create-self-signed-subject-alternate-name-certificates-for-ontap","title":"Create Self-Signed Subject Alternate Name Certificates for ONTAP","text":"

    Subject alternate name (SAN) certificates allow multiple hostnames in a single certificate. Starting with Go 1.3, when connecting to a cluster via its IP address, the CN field in the server certificate is ignored. This often causes errors like this: x509: cannot validate certificate for 127.0.0.1 because it doesn't contain any IP SANs

    "},{"location":"prepare-cdot-clusters/#overview-of-steps-to-create-a-self-signed-san-certificate-and-make-ontap-use-it","title":"Overview of steps to create a self-signed SAN certificate and make ONTAP use it","text":"
    1. Create a root key
    2. Create a root certificate authority certificate
    3. Create a SAN certificate for your ONTAP cluster, using #2 to create it
    4. Install root ca certificate created in step #2 on cluster
    5. Install SAN certificate created in step #3 on your cluster
    6. Modify your cluster/SVM to use the new certificate installed at step #5
    "},{"location":"prepare-cdot-clusters/#setup","title":"Setup","text":"
    # create a place to store the certificate authority files, adjust as needed\nmkdir -p ca/{private,certs}\n
    "},{"location":"prepare-cdot-clusters/#create-a-root-key","title":"Create a root key","text":"
    cd ca\n# generate a private key that we will use to create our self-signed certificate authority\nopenssl genrsa -out private/ca.key.pem 4096\nchmod 400 private/ca.key.pem\n
    "},{"location":"prepare-cdot-clusters/#create-a-root-certificate-authority-certificate","title":"Create a root certificate authority certificate","text":"

    Download the sample openssl.cnf file and put it in the directory we created in setup. Edit line 9, changing dir to point to your ca directory created in setup.

    openssl req -config openssl.cnf -key private/ca.key.pem -new -x509 -days 7300 -sha256 -extensions v3_ca -out certs/ca.cert.pem\n\n# Verify\nopenssl x509 -noout -text -in certs/ca.cert.pem\n\n# Make sure these are present\n    Signature Algorithm: sha256WithRSAEncryption               <======== Signature Algorithm can not be sha-1\n        X509v3 extensions:\n            X509v3 Subject Key Identifier: \n                --removed\n            X509v3 Authority Key Identifier: \n                --removed\n\n            X509v3 Basic Constraints: critical\n                CA:TRUE                                        <======== CA must be true\n            X509v3 Key Usage: critical\n                Digital Signature, Certificate Sign, CRL Sign  <======== Digital and certificate signature\n
    "},{"location":"prepare-cdot-clusters/#create-a-san-certificate-for-your-ontap-cluster","title":"Create a SAN certificate for your ONTAP cluster","text":"

    First, we'll create the certificate signing request and then the certificate. In this example, the ONTAP cluster is named umeng-aff300-05-06, update accordingly.

    Download the sample server_cert.cnf file and put it in the directory we created in setup. Edit lines 18-21 to include your ONTAP cluster hostnames and IP addresses. Edit lines 6-11 with new names as needed.

    openssl req -new -newkey rsa:4096 -nodes -sha256 -subj \"/\" -config server_cert.cnf -outform pem -out umeng-aff300-05-06.csr -keyout umeng-aff300-05-06.key\n\n# Verify\nopenssl req -text -noout -in umeng-aff300-05-06.csr\n\n# Make sure these are present\n        Attributes:\n        Requested Extensions:\n            X509v3 Subject Alternative Name:         <======== Section that lists alternate DNS and IP names\n                DNS:umeng-aff300-05-06-cm.rtp.openenglab.netapp.com, DNS:umeng-aff300-05-06, IP Address:10.193.48.11, IP Address:10.193.48.11\n    Signature Algorithm: sha256WithRSAEncryption     <======== Signature Algorithm can not be sha-1\n

    We'll now use the certificate signing request and the recently created certificate authority to create a new SAN certificate for our cluster.

    openssl x509 -req -sha256 -days 30 -in umeng-aff300-05-06.csr -CA certs/ca.cert.pem -CAkey private/ca.key.pem -CAcreateserial -out umeng-aff300-05-06.crt -extensions req_ext -extfile server_cert.cnf\n\n# Verify\nopenssl x509 -text -noout -in umeng-aff300-05-06.crt\n\n# Make sure these are present\nX509v3 extensions:\n            X509v3 Subject Alternative Name:       <======== Section that lists alternate DNS and IP names\n                DNS:umeng-aff300-05-06-cm.rtp.openenglab.netapp.com, DNS:umeng-aff300-05-06, IP Address:10.193.48.11, IP Address:10.193.48.11\n    Signature Algorithm: sha256WithRSAEncryption   <======== Signature Algorithm can not be sha-1\n
    "},{"location":"prepare-cdot-clusters/#install-root-ca-certificate-on-cluster","title":"Install Root CA Certificate On Cluster","text":"

    Login to your cluster with admin credentials and install the server certificate authority. Copy from ca/certs/ca.cert.pem

    ssh admin@IP\numeng-aff300-05-06::*> security certificate install -type server-ca\n\nPlease enter Certificate: Press <Enter> when done\n-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n\nYou should keep a copy of the CA-signed digital certificate for future reference.\n\nThe installed certificate's CA and serial number for reference:\nCA: ntap\nSerial: 46AFFC7A3A9999999E8FB2FEB0\n\nThe certificate's generated name for reference: ntap\n

    Now install the server certificate we created above with SAN. Copy certificate from ca/umeng-aff300-05-06.crt and private key from ca/umeng-aff300-05-06.key

    umeng-aff300-05-06::*> security certificate install -type server\n\nPlease enter Certificate: Press <Enter> when done\n-----BEGIN CERTIFICATE-----\n..\n-----END CERTIFICATE-----\n\nPlease enter Private Key: Press <Enter> when done\n-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n\nPlease enter certificates of Certification Authorities (CA) which form the certificate chain of the server certificate. This starts with the issuing CA certificate of the server certificate and can range up to the root CA certificate.\n\nDo you want to continue entering root and/or intermediate certificates {y|n}: n\n

    If ONTAP tells you the provided certificate does not have a common name in the subject field, type the hostname of the cluster like this:

    The provided certificate does not have a common name in the subject field.\n\nEnter a valid common name to continue installation of the certificate:\n\nEnter a valid common name to continue installation of the certificate: umeng-aff300-05-06-cm.rtp.openenglab.netapp.com\n\nYou should keep a copy of the private key and the CA-signed digital certificate for future reference.\n\nThe installed certificate's CA and serial number for reference:\nCA: ntap\nSerial: 67A94AA25B229A68AC5BABACA8939A835AA998A58\n\nThe certificate's generated name for reference: umeng-aff300-05-06-cm.rtp.openenglab.netapp.com\n
    "},{"location":"prepare-cdot-clusters/#modify-the-admin-svm-to-use-the-new-certificate","title":"Modify the admin SVM to use the new certificate","text":"

    We'll modify the cluster's admin SVM to use the just installed server certificate and certificate authority.

    vserver show -type admin -fields vserver,type\nvserver            type\n------------------ -----\numeng-aff300-05-06 admin\n\numeng-aff300-05-06::*> ssl modify -vserver umeng-aff300-05-06 -server-enabled true -serial 67A94AA25B229A68AC5BABACA8939A835AA998A58 -ca ntap\n  (security ssl modify)\n

    You can verify the certificate(s) are installed and working by using openssl like so:

    openssl s_client -CAfile certs/ca.cert.pem -showcerts -servername server -connect umeng-aff300-05-06-cm.rtp.openenglab.netapp.com:443\n\nCONNECTED(00000005)\ndepth=1 C = US, ST = NC, L = RTP, O = ntap, OU = ntap\nverify return:1\ndepth=0 \nverify return:1\n...\n

    without the -CAfile, openssl will report

    CONNECTED(00000005)\ndepth=0 \nverify error:num=20:unable to get local issuer certificate\nverify return:1\ndepth=0 \nverify error:num=21:unable to verify the first certificate\nverify return:1\n---\n
    "},{"location":"prepare-cdot-clusters/#create-client-certificates-for-password-less-login","title":"Create Client Certificates for Password-less Login","text":"

    Copy the server certificate we created above into the Harvest install directory.

    cp ca/umeng-aff300-05-06.crt /opt/harvest\ncd /opt/harvest\n

    Create a self-signed client key and certificate with the same name as the hostname where Harvest is running. It's not required to name the key/cert pair after the hostname, but if you do, Harvest will load them automatically when you specify auth_style: certificate_auth otherwise you can point to them directly. See Pollers for details.

    Change the common name to the ONTAP user you set up with the harvest role above. e.g harvest2

    cd /opt/harvest\nmkdir cert\nopenssl req -x509 -nodes -days 1095 -newkey rsa:2048 -keyout cert/$(hostname).key -out cert/$(hostname).pem -subj \"/CN=harvest2\"\n
    "},{"location":"prepare-cdot-clusters/#install-client-certificates-on-cluster","title":"Install Client Certificates on Cluster","text":"

    Login to your cluster with admin credentials and install the client certificate. Copy from cert/$(hostname).pem

    ssh admin@IP\numeng-aff300-05-06::*>  security certificate install -type client-ca -vserver umeng-aff300-05-06\n\nPlease enter Certificate: Press <Enter> when done\n-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n\nYou should keep a copy of the CA-signed digital certificate for future reference.\n\nThe installed certificate's CA and serial number for reference:\nCA: cbg\nSerial: B77B59444444CCCC\n\nThe certificate's generated name for reference: cbg_B77B59444444CCCC\n

    Now that the client certificate is installed, let's enable it.

    umeng-aff300-05-06::*> ssl modify -vserver umeng-aff300-05-06 -client-enabled true\n  (security ssl modify)\n

    Verify with a recent version of curl. If you are running on a Mac see below.

    curl --cacert umeng-aff300-05-06.crt --key cert/$(hostname).key --cert cert/$(hostname).pem https://umeng-aff300-05-06-cm.rtp.openenglab.netapp.com/api/storage/disks\n
    "},{"location":"prepare-cdot-clusters/#update-harvestyml-to-use-client-certificates","title":"Update Harvest.yml to use client certificates","text":"

    Update the poller section with auth_style: certificate_auth like this:

      u2-cert: \n    auth_style: certificate_auth\n    addr: umeng-aff300-05-06-cm.rtp.openenglab.netapp.com\n

    Restart your poller and enjoy your password-less life-style.

    "},{"location":"prepare-cdot-clusters/#macos","title":"macOS","text":"

    The version of curl installed on macOS up through Monterey is not recent enough to work with self-signed SAN certs. You will need to install a newer version of curl via Homebrew, MacPorts, source, etc.

    Example of failure when running with an older version of curl - you will see this in client auth test step above.

    curl --version\ncurl 7.64.1 (x86_64-apple-darwin20.0) libcurl/7.64.1 (SecureTransport) LibreSSL/2.8.3 zlib/1.2.11 nghttp2/1.41.0\n\ncurl --cacert umeng-aff300-05-06.crt --key cert/cgrindst-mac-0.key --cert cert/cgrindst-mac-0.pem https://umeng-aff300-05-06-cm.rtp.openenglab.netapp.com/api/storage/disks\n\ncurl: (60) SSL certificate problem: unable to get local issuer certificate\n

    Let's install curl via Homebrew. Make sure you don't miss the message that Homebrew prints about your path.

    If you need to have curl first in your PATH, run:\n  echo 'export PATH=\"/usr/local/opt/curl/bin:$PATH\"' >> /Users/cgrindst/.bash_profile\n

    Now when we make a client auth request with our self-signed certificate, it works! \\o/

    brew install curl\n\ncurl --version\ncurl 7.80.0 (x86_64-apple-darwin20.6.0) libcurl/7.80.0 (SecureTransport) OpenSSL/1.1.1l zlib/1.2.11 brotli/1.0.9 zstd/1.5.0 libidn2/2.3.2 libssh2/1.10.0 nghttp2/1.46.0 librtmp/2.3 OpenLDAP/2.6.0\nRelease-Date: 2021-11-10\nProtocols: dict file ftp ftps gopher gophers http https imap imaps ldap ldaps mqtt pop3 pop3s rtmp rtsp scp sftp smb smbs smtp smtps telnet tftp \nFeatures: alt-svc AsynchDNS brotli GSS-API HSTS HTTP2 HTTPS-proxy IDN IPv6 Kerberos Largefile libz MultiSSL NTLM NTLM_WB SPNEGO SSL TLS-SRP UnixSockets zstd\n\ncurl --cacert umeng-aff300-05-06.crt --key cert/cgrindst-mac-0.key --cert cert/cgrindst-mac-0.pem https://umeng-aff300-05-06-cm.rtp.openenglab.netapp.com/api/storage/disks\n\n{\n  \"records\": [\n    {\n      \"name\": \"1.1.22\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/storage/disks/1.1.22\"\n        }\n      }\n    }\n}\n

    Change directory to your Harvest home directory (replace /opt/harvest/ if this is not the default):

    $ cd /opt/harvest/\n

    Generate an SSL cert and key pair with the following command. Note that it's preferred to generate these files using the hostname of the local machine. The command below assumes debian8 as our hostname name and harvest2 as the user we created in the previous step:

    openssl req -x509 -nodes -days 1095 -newkey rsa:2048 -keyout cert/debian8.key \\\n -out cert/debian8.pem  -subj \"/CN=harvest2\"\n

    Next, open the public key (debian8.pem in our example) and copy all of its content. Login into your ONTAP CLI and run this command by replacing CLUSTER with the name of your cluster.

    security certificate install -type client-ca -vserver CLUSTER\n

    Paste the public key content and hit enter. Output should be similar to this:

    jamaica::> security certificate install -type client-ca -vserver jamaica \n\nPlease enter Certificate: Press <Enter> when done\n-----BEGIN CERTIFICATE-----                       \nMIIDETCCAfmgAwIBAgIUP9EUXyl2BDSUOkNEcDU0yqbJ29IwDQYJKoZIhvcNAQEL\nBQAwGDEWMBQGA1UEAwwNaGFydmVzdDItY2xpMzAeFw0yMDEwMDkxMjA0MDhaFw0y\nMzEwMDktcGFueSBMdGQxFzAVBgNVBAMlc3QyLWNsaTMwggEiMA0tcGFueSBGCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVVy25BeCRoGCJWFOlyUL7Ddkze4Hl2/6u\nqye/3mk5vBNsGuXUrtad5XfBB70Ez9hWl5sraLiY68ro6MyX1icjiUTeaYDvS/76\nIw7HeXJ5Pyb/fWth1nePunytoLyG/vaTCySINkIV5nlxC+k0X3wWFJdfJzhloPtt\n1Vdm7aCF2q6a2oZRnUEBGQb6t5KyF0/Xh65mvfgB0pl/AS2HY5Gz+~L54Xyvs+BY\nV7UmTop7WBYl0L3QXLieERpHXnyOXmtwlm1vG5g4n/0DVBNTBXjEdvc6oRh8sxBN\nZlQWRApE7pa/I1bLD7G2AiS4UcPmR4cEpPRVEsOFOaAN3Z3YskvnAgMBAAGjUzBR\nMB0GA1UdDgQWBBQr4syV6TCcgO/5EcU/F8L2YYF15jAfBgNVHSMEGDAWgBQr4syV\n6TCcgO/5EcU/F8L2YYF15jAPBgNVHRMdfdfwerH/MA0GCSqGSIb^ECd3DQEBCwUA\nA4IBAQBjP1BVhClRKkO/M3zlWa2L9Ztce6SuGwSnm6Ebmbs+iMc7o2N9p3RmV6Xl\nh6NcdXRzzPAVrUoK8ewhnBzdghgIPoCI6inAf1CUhcCX2xcnE/osO+CfvKuFnPYE\nWQ7UNLsdfka0a9kTK13r3GMs09z/VsDs0gD8UhPjoeO7LQhdU9tJ/qOaSP3s48pv\nsYzZurHUgKmVOaOE4t9DAdevSECEWCETRETA$Vbn%@@@%%rcdrctru65ryFaByb+\nhTtGhDnoHwzt/cAGvLGV/RyWdGFAbu7Fb1rV94ceggE7nh1FqbdLH9siot6LlnQN\nMhEWp5PYgndOW49dDYUxoauCCkiA\n-----END CERTIFICATE-----\n\n\nYou should keep a copy of the CA-signed digital certificate for future reference.\n\nThe installed certificate's CA and serial number for reference:\nCA: harvest2\nSerial: 3FD1145F2976043012213d3009095534CCRDBD2\n\nThe certificate's generated name for reference: harvest2\n

    Finally, we need to enable SSL authentication with the following command (replace CLUSTER with the name of your cluster):

    security ssl modify -client-enabled true -vserver CLUSTER\n
    "},{"location":"prepare-cdot-clusters/#reference","title":"Reference","text":"
    • https://github.com/jcbsmpsn/golang-https-example
    "},{"location":"prepare-fsx-clusters/","title":"Amazon FSx for ONTAP","text":""},{"location":"prepare-fsx-clusters/#prepare-amazon-fsx-for-ontap","title":"Prepare Amazon FSx for ONTAP","text":"

    To set up Harvest and FSx make sure you read through Monitoring FSx for ONTAP file systems using Harvest and Grafana

    "},{"location":"prepare-fsx-clusters/#supported-harvest-dashboards","title":"Supported Harvest Dashboards","text":"

    Amazon FSx for ONTAP exposes a different set of metrics than ONTAP cDOT. That means a limited set of out-of-the-box dashboards are supported and some panels may be missing information.

    The dashboards that work with FSx are tagged with fsx and listed below:

    • ONTAP: cDOT
    • ONTAP: Cluster
    • ONTAP: Data Protection Snapshots
    • ONTAP: Datacenter
    • ONTAP: FlexGroup
    • ONTAP: LUN
    • ONTAP: NFS Troubleshooting
    • ONTAP: Quota
    • ONTAP: Security
    • ONTAP: SVM
    • ONTAP: Volume
    • ONTAP: Volume by SVM
    • ONTAP: Volume Deep Dive
    "},{"location":"prepare-storagegrid-clusters/","title":"StorageGRID","text":""},{"location":"prepare-storagegrid-clusters/#prepare-storagegrid-cluster","title":"Prepare StorageGRID cluster","text":"

    NetApp Harvest requires login credentials to access StorageGRID hosts. Although, a generic admin account can be used, it is better to create a dedicated monitoring user with the fewest permissions.

    Here's a summary of what we're going to do

    1. Create a StorageGRID group with the necessary capabilities that Harvest will use to auth and collect data
    2. Create a user assigned to the group created in step #1.
    "},{"location":"prepare-storagegrid-clusters/#create-storagegrid-group-permissions","title":"Create StorageGRID group permissions","text":"

    These steps are documented here.

    You will need a root or admin account to create a new group permission.

    1. Select CONFIGURATION > Access control > Admin groups
    2. Select Create group
    3. Select Local group
    4. Enter a display name for the group, which you can update later as required. For example, Harvest or monitoring.
    5. Enter a unique name for the group, which you cannot update later.
    6. Select Continue
    7. On the Manage group permissions screen, select the permissions you want. At a minimum, Harvest requires the Tenant accounts and Metrics query permissions.
    8. Select Save changes

    "},{"location":"prepare-storagegrid-clusters/#create-a-storagegrid-user","title":"Create a StorageGRID user","text":"

    These steps are documented here.

    You will need a root or admin account to create a new user.

    1. Select CONFIGURATION > Access control > Admin users
    2. Select Create user
    3. Enter the user\u2019s full name, a unique username, and a password.
    4. Select Continue.
    5. Assign the user to the previously created harvest group.
    6. Select Create user and select Finish.

    "},{"location":"prepare-storagegrid-clusters/#reference","title":"Reference","text":"

    See group permissions for more information on StorageGRID permissions.

    "},{"location":"prometheus-exporter/","title":"Prometheus Exporter","text":"Prometheus Install

    The information below describes how to setup Harvest's Prometheus exporter. If you need help installing or setting up Prometheus, check out their documentation.

    "},{"location":"prometheus-exporter/#overview","title":"Overview","text":"

    The Prometheus exporter is responsible for:

    • formatting metrics into the Prometheus line protocol
    • creating a web-endpoint on http://<ADDR>:<PORT>/metrics (or https: if TLS is enabled) for Prometheus to scrape

    A web end-point is required because Prometheus scrapes Harvest by polling that end-point.

    In addition to the /metrics end-point, the Prometheus exporter also serves an overview of all metrics and collectors available on its root address scheme://<ADDR>:<PORT>/.

    Because Prometheus polls Harvest, don't forget to update your Prometheus configuration and tell Prometheus how to scrape each poller.

    There are two ways to configure the Prometheus exporter: using a port range or individual ports.

    The port range is more flexible and should be used when you want multiple pollers all exporting to the same instance of Prometheus. Both options are explained below.

    "},{"location":"prometheus-exporter/#parameters","title":"Parameters","text":"

    All parameters of the exporter are defined in the Exporters section of harvest.yml.

    An overview of all parameters:

    parameter type description default port_range int-int (range), overrides port if specified lower port to upper port (inclusive) of the HTTP end-point to create when a poller specifies this exporter. Starting at lower port, each free port will be tried sequentially up to the upper port. port int, required if port_range is not specified port of the HTTP end-point local_http_addr string, optional address of the HTTP server Harvest starts for Prometheus to scrape:use localhost to serve only on the local machineuse 0.0.0.0 (default) if Prometheus is scrapping from another machine 0.0.0.0 global_prefix string, optional add a prefix to all metrics (e.g. netapp_) allow_addrs list of strings, optional allow access only if host matches any of the provided addresses allow_addrs_regex list of strings, optional allow access only if host address matches at least one of the regular expressions cache_max_keep string (Go duration format), optional maximum amount of time metrics are cached (in case Prometheus does not timely collect the metrics) 5m add_meta_tags bool, optional add HELP and TYPE metatags to metrics (currently no useful information, but required by some tools) false sort_labels bool, optional sort metric labels before exporting. Some open-metrics scrapers report stale metrics when labels are not sorted. false tls tls optional If present, enables TLS transport. If running in a container, see note tls cert_file, key_file required child of tls Relative or absolute path to TLS certificate and key file. TLS 1.3 certificates required.FIPS complaint P-256 TLS 1.3 certificates can be created with bin/harvest admin tls create server, openssl, mkcert, etc.

    A few examples:

    "},{"location":"prometheus-exporter/#port_range","title":"port_range","text":"
    Exporters:\n  prom-prod:\n    exporter: Prometheus\n    port_range: 2000-2030\nPollers:\n  cluster-01:\n    exporters:\n      - prom-prod\n  cluster-02:\n    exporters:\n      - prom-prod\n  cluster-03:\n    exporters:\n      - prom-prod\n  # ... more\n  cluster-16:\n    exporters:\n      - prom-prod\n

    Sixteen pollers will collect metrics from 16 clusters and make those metrics available to a single instance of Prometheus named prom-prod. Sixteen web end-points will be created on the first 16 available free ports between 2000 and 2030 (inclusive).

    After staring the pollers in the example above, running bin/harvest status shows the following. Note that ports 2000 and 2003 were not available so the next free port in the range was selected. If no free port can be found an error will be logged.

    Datacenter   Poller       PID     PromPort  Status              \n++++++++++++ ++++++++++++ +++++++ +++++++++ ++++++++++++++++++++\nDC-01        cluster-01   2339    2001      running         \nDC-01        cluster-02   2343    2002      running         \nDC-01        cluster-03   2351    2004      running         \n...\nDC-01        cluster-14   2405    2015      running         \nDC-01        cluster-15   2502    2016      running         \nDC-01        cluster-16   2514    2017      running         \n
    "},{"location":"prometheus-exporter/#allow_addrs","title":"allow_addrs","text":"
    Exporters:\n  my_prom:\n    allow_addrs:\n      - 192.168.0.102\n      - 192.168.0.103\n

    will only allow access from exactly these two addresses.

    "},{"location":"prometheus-exporter/#allow_addrs_regex","title":"allow_addrs_regex","text":"
    Exporters:\n  my_prom:\n    allow_addrs_regex:\n      - `^192.168.0.\\d+$`\n

    will only allow access from the IP4 range 192.168.0.0-192.168.0.255.

    "},{"location":"prometheus-exporter/#configure-prometheus-to-scrape-harvest-pollers","title":"Configure Prometheus to scrape Harvest pollers","text":"

    There are two ways to tell Prometheus how to scrape Harvest: using HTTP service discovery (SD) or listing each poller individually.

    HTTP service discovery is the more flexible of the two. It is also less error-prone, and easier to manage. Combined with the port_range configuration described above, SD is the least effort to configure Prometheus and the easiest way to keep both Harvest and Prometheus in sync.

    NOTE HTTP service discovery does not work with Docker yet. With Docker, you will need to list each poller individually or if possible, use the Docker Compose workflow that uses file service discovery to achieve a similar ease-of-use as HTTP service discovery.

    See the example below for how to use HTTP SD and port_range together.

    "},{"location":"prometheus-exporter/#prometheus-http-service-discovery","title":"Prometheus HTTP Service Discovery","text":"

    HTTP service discovery was introduced in Prometheus version 2.28.0. Make sure you're using that version or later.

    The way service discovery works is:

    • shortly after a poller starts up, it registers with the SD node (if one exists)
    • the poller sends a heartbeat to the SD node, by default every 45s.
    • if a poller fails to send a heartbeat, the SD node removes the poller from the list of active targets after a minute
    • the SD end-point is reachable via SCHEMA:///api/v1/sd

      To use HTTP service discovery you need to:

      1. tell Harvest to start the HTTP service discovery process
      2. tell Prometheus to use the HTTP service discovery endpoint
      "},{"location":"prometheus-exporter/#enable-http-service-discovery-in-harvest","title":"Enable HTTP service discovery in Harvest","text":"

      Add the following to your harvest.yml

      Admin:\n  httpsd:\n    listen: :8887\n

      This tells Harvest to create an HTTP service discovery end-point on interface 0.0.0.0:8887. If you want to only listen on localhost, use 127.0.0.1:<port> instead. See net.Dial for details on the supported listen formats.

      Start the SD process by running bin/harvest admin start. Once it is started, you can curl the end-point for the list of running Harvest pollers.

      curl -s 'http://localhost:8887/api/v1/sd' | jq .\n[\n  {\n    \"targets\": [\n      \"10.0.1.55:12990\",\n      \"10.0.1.55:15037\",\n      \"127.0.0.1:15511\",\n      \"127.0.0.1:15008\",\n      \"127.0.0.1:15191\",\n      \"10.0.1.55:15343\"\n    ]\n  }\n]\n
      "},{"location":"prometheus-exporter/#harvest-http-service-discovery-options","title":"Harvest HTTP Service Discovery options","text":"

      HTTP service discovery (SD) is configured in the Admin > httpsd section of your harvest.yml.

      parameter type description default listen required Interface and port to listen on, use localhost:PORT or :PORT for all interfaces auth_basic optional If present, enables basic authentication on /api/v1/sd end-point auth_basic username, password required child of auth_basic tls optional If present, enables TLS transport. If running in a container, see note tls cert_file, key_file required child of tls Relative or absolute path to TLS certificate and key file. TLS 1.3 certificates required.FIPS complaint P-256 TLS 1.3 certificates can be created with bin/harvest admin tls create server ssl_cert, ssl_key optional if auth_style is certificate_auth Absolute paths to SSL (client) certificate and key used to authenticate with the target system.If not provided, the poller will look for <hostname>.key and <hostname>.pem in $HARVEST_HOME/cert/.To create certificates for ONTAP systems, see using certificate authentication heart_beat optional, Go Duration format How frequently each poller sends a heartbeat message to the SD node 45s expire_after optional, Go Duration format If a poller fails to send a heartbeat, the SD node removes the poller after this duration 1m"},{"location":"prometheus-exporter/#enable-http-service-discovery-in-prometheus","title":"Enable HTTP service discovery in Prometheus","text":"

      Edit your prometheus.yml and add the following section

      $ vim /etc/prometheus/prometheus.yml

      scrape_configs:\n  - job_name: harvest\n    http_sd_configs:\n      - url: http://localhost:8887/api/v1/sd\n

      Harvest and Prometheus both support basic authentication for HTTP SD end-points. To enable basic auth, add the following to your Harvest config.

      Admin:\n  httpsd:\n    listen: :8887\n    # Basic auth protects GETs and publishes\n    auth_basic:\n      username: admin\n      password: admin\n

      Don't forget to also update your Prometheus config with the matching basic_auth credentials.

      "},{"location":"prometheus-exporter/#prometheus-http-service-discovery-and-port-range","title":"Prometheus HTTP Service Discovery and Port Range","text":"

      HTTP SD combined with Harvest's port_range feature leads to significantly less configuration in your harvest.yml. For example, if your clusters all export to the same Prometheus instance, you can refactor the per-poller exporter into a single exporter shared by all clusters in Defaults as shown below:

      Notice that none of the pollers specify an exporter. Instead, all the pollers share the single exporter named prometheus-r listed in Defaults. prometheus-r is the only exporter defined and as specified will manage up to 1,000 Harvest Prometheus exporters.

      If you add or remove more clusters in the Pollers section, you do not have to change Prometheus since it dynamically pulls the targets from the Harvest admin node.

      Admin:\n  httpsd:\n    listen: :8887\n\nExporters:\n  prometheus-r:\n    exporter: Prometheus\n    port_range: 13000-13999\n\nDefaults:\n  collectors:\n    - Zapi\n    - ZapiPerf\n  use_insecure_tls: false\n  auth_style: password\n  username: admin\n  password: pass\n  exporters:\n    - prometheus-r\n\nPollers:\n  umeng_aff300:\n    datacenter: meg\n    addr: 10.193.48.11\n\n  F2240-127-26:\n    datacenter: meg\n    addr: 10.193.6.61\n\n  # ... add more clusters\n
      "},{"location":"prometheus-exporter/#static-scrape-targets","title":"Static Scrape Targets","text":"

      If we define two Prometheus exporters at ports: 12990 and 14567 in the harvest.yml file like so, you need to add two targets to your prometheus.yml too.

      $ vim harvest.yml\n
      Exporters:\n  prometheus1:\n    exporter: Prometheus\n    port: 12990\n  prometheus2:\n    exporter: Prometheus\n    port: 14567\n\nPollers:\n  cluster1:\n    addr: 10.0.1.1\n    username: user\n    password: pass\n    exporters:\n      - prometheus1\n  cluster2:\n      addr: 10.0.1.1\n      username: user\n      password: pass\n      exporters:\n        - prometheus2\n
      $ vim /etc/prometheus/prometheus.yml\n

      Scroll down to near the end of the file and add the following lines:

        - job_name: 'harvest'\n    scrape_interval: 60s\n    static_configs:\n      - targets:\n          - 'localhost:12990'\n          - 'localhost:14567'\n

      NOTE If Prometheus is not on the same machine as Harvest, then replace localhost with the IP address of your Harvest machine. Also note the scrape interval above is set to 1m. That matches the polling frequency of the default Harvest collectors. If you change the polling frequency of a Harvest collector to a lower value, you should also change the scrape interval.

      "},{"location":"prometheus-exporter/#prometheus-exporter-and-tls","title":"Prometheus Exporter and TLS","text":"

      The Harvest Prometheus exporter can be configured to serve its metrics via HTTPS by configuring the tls section in the Exporters section of harvest.yml.

      Let's walk through an example of how to set up Harvest's Prometheus exporter and how to configure Prometheus to use TLS.

      "},{"location":"prometheus-exporter/#generate-tls-certificates","title":"Generate TLS Certificates","text":"

      We'll use Harvest's admin command line tool to create a self-signed TLS certificate key/pair for the exporter and Prometheus. Note: If running in a container, see note.

      cd $Harvest_Install_Directory\nbin/harvest admin tls create server\n2023/06/23 09:39:48 wrote cert/admin-cert.pem\n2023/06/23 09:39:48 wrote cert/admin-key.pem\n

      Two files are created. Since we want to use these certificates for our Prometheus exporter, let's rename them to make that clearer.

      mv cert/admin-cert.pem cert/prom-cert.pem\nmv cert/admin-key.pem cert/prom-key.pem\n
      "},{"location":"prometheus-exporter/#configure-harvest-prometheus-exporter-to-use-tls","title":"Configure Harvest Prometheus Exporter to use TLS","text":"

      Edit your harvest.yml and add a TLS section to your exporter block like this:

      Exporters:\n  my-exporter:\n    local_http_addr: localhost\n    exporter: Prometheus\n    port: 16001\n    tls:\n      cert_file: cert/prom-cert.pem\n      key_file: cert/prom-key.pem\n

      Update one of your Pollers to use this exporter and start the poller.

      Pollers:\n  my-cluster:\n    datacenter: dc-1\n    addr: 10.193.48.11\n    exporters:\n      - my-exporter     # Use TLS exporter we created above\n

      When the poller is started, it will log whether https or http is being used as part of the url like so:

      bin/harvest start -f my-cluster\n2023-06-23T10:02:03-04:00 INF prometheus/httpd.go:40 > server listen Poller=my-cluster exporter=my-exporter url=https://localhost:16001/metrics\n

      If the url schema is https, TLS is being used.

      You can use curl to scrape the Prometheus exporter and verify that TLS is being used like so:

      curl --cacert cert/prom-cert.pem https://localhost:16001/metrics\n\n# or use --insecure to tell curl to skip certificate validation\n# curl --insecure cert/prom-cert.pem https://localhost:16001/metrics  \n
      "},{"location":"prometheus-exporter/#configure-prometheus-to-use-tls","title":"Configure Prometheus to use TLS","text":"

      Let's configure Prometheus to use HTTPs to communicate with the exporter setup above.

      Edit your prometheus.yml and add or adapt your scrape_configs job. You need to add scheme: https and setup a tls_config block to point to the earlier created prom-cert.pem like so:

      scrape_configs:\n  - job_name: 'harvest-https'\n    scheme: https\n    tls_config:\n      ca_file: /path/to/prom-cert.pem\n    static_configs:\n    - targets:\n        - 'localhost:16001'\n

      Start Prometheus and visit http://localhost:9090/targets with your browser. You should see https://localhost:16001/metrics in the list of targets.

      "},{"location":"prometheus-exporter/#prometheus-alerts","title":"Prometheus Alerts","text":"

      Prometheus includes out-of-the-box support for simple alerting. Alert rules are configured in your prometheus.yml file. Setup and details can be found in the Prometheus guide on alerting.

      Harvest also includes EMS and sample alerts for reference. Refer to the EMS Collector for more details about EMS events. Refer to the EMS alert runbook for descriptions and remediation steps.

      "},{"location":"prometheus-exporter/#alertmanager","title":"Alertmanager","text":"

      Prometheus's builtin alerts are good for simple workflows. They do a nice job telling you what's happening at the moment. If you need a richer solution that includes summarization, notification, advanced delivery, deduplication, etc. checkout Alertmanager.

      "},{"location":"prometheus-exporter/#reference","title":"Reference","text":"
      • Prometheus Alerting
      • Alertmanager
      • Alertmanager's notification metrics
      • Prometheus Linter
      • Collection of example Prometheus Alerts
      "},{"location":"quickstart/","title":"Quickstart","text":"

      Welcome to the NetApp Harvest Getting Started Guide. This tutorial will guide you through the steps required to deploy an instance of NetApp Harvest, Prometheus, and Grafana on a Linux platform to monitor an ONTAP cluster.

      This tutorial uses systemd to manage Harvest, Prometheus, and Grafana. If you would rather, run the processes directly, feel free to ignore the sections of the tutorial that setup systemd service files.

      "},{"location":"quickstart/#1-set-installation-path","title":"1. Set Installation Path","text":"

      First, set the installation path as an environment variable. For example, we'll use /opt/netapp/harvest.

      HARVEST_INSTALL_PATH=/opt/netapp/harvest\nmkdir -p ${HARVEST_INSTALL_PATH}\n
      "},{"location":"quickstart/#2-install-harvest","title":"2. Install Harvest","text":"

      Harvest is distributed as a container, native tarball, and RPM and Debs. Pick the one that works best for you. More details can be found in the installation documentation.

      For this guide, we'll use the tarball package as an example.

      Visit the releases page and take note of the latest release. Update the HARVEST_VERSION environment variable with the latest release in the script below. For example, to download the 24.05.2 release you would use HARVEST_VERSION=24.05.2

      After updating the HARVEST_VERSION environment variable run the bash script to download Harvest and untar it into your HARVEST_INSTALL_PATH directory.

      HARVEST_VERSION=24.05.2\ncd ${HARVEST_INSTALL_PATH}\nwget https://github.com/NetApp/harvest/releases/download/v${HARVEST_VERSION}/harvest-${HARVEST_VERSION}-1_linux_amd64.tar.gz\ntar -xvf harvest-${HARVEST_VERSION}-1_linux_amd64.tar.gz\n
      "},{"location":"quickstart/#3-install-prometheus","title":"3. Install Prometheus","text":"

      To install Prometheus, follow these steps. For more details see Prometheus installation.

      PROMETHEUS_VERSION=2.49.1\ncd ${HARVEST_INSTALL_PATH}\nwget https://github.com/prometheus/prometheus/releases/download/v${PROMETHEUS_VERSION}/prometheus-${PROMETHEUS_VERSION}.linux-amd64.tar.gz\ntar -xvf prometheus-${PROMETHEUS_VERSION}.linux-amd64.tar.gz\nmv prometheus-${PROMETHEUS_VERSION}.linux-amd64 prometheus-${PROMETHEUS_VERSION}\n
      If you want to manage Prometheus with systemd, you can create a service file for Prometheus like so. This step is optional. A service file will attempt to restart Prometheus automatically when the machine is restarted.

      Create a service file for Prometheus:

      cat << EOF | sudo tee /etc/systemd/system/prometheus.service\n[Unit]\nDescription=Prometheus Server\nDocumentation=https://prometheus.io/docs/introduction/overview/\nAfter=network-online.target\n\n[Service]\nUser=root\nRestart=on-failure\nExecStart=${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}/prometheus --config.file=${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}/prometheus.yml\n\n[Install]\nWantedBy=multi-user.target\nEOF\n

      Reload the systemd configuration and start Prometheus:

      sudo systemctl daemon-reload\nsudo systemctl enable prometheus\nsudo systemctl start prometheus\n

      Check if Prometheus is up and running:

      sudo systemctl status prometheus\n

      You should see output indicating that the Prometheus service is active and running.

      Alternative: Start Prometheus Directly If you would rather start Prometheus directly and kick the tires before creating a service file, you can run the following command to start Prometheus in the background:
      nohup ${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}/prometheus --config.file=${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}/prometheus.yml > prometheus.log 2>&1 &\n
      This command uses nohup to run Prometheus in the background and redirects the output to prometheus.log."},{"location":"quickstart/#4-install-grafana","title":"4. Install Grafana","text":"

      To install Grafana, follow these steps:

      GRAFANA_VERSION=10.4.5\ncd ${HARVEST_INSTALL_PATH}\nwget https://dl.grafana.com/oss/release/grafana-${GRAFANA_VERSION}.linux-amd64.tar.gz\ntar -xvf grafana-${GRAFANA_VERSION}.linux-amd64.tar.gz\n

      If you want to manage Grafana with systemd, you can create a service file for Grafana like so. This step is optional. A service file will attempt to restart Grafana automatically when the machine is restarted.

      Create a service file for Grafana:

      cat << EOF | sudo tee /etc/systemd/system/grafana.service\n[Unit]\nDescription=Grafana Server\nDocumentation=https://grafana.com/docs/grafana/latest/setup-grafana/installation/\nAfter=network-online.target\n\n[Service]\nUser=root\nRestart=on-failure\nExecStart=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}/bin/grafana-server --config=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}/conf/defaults.ini --homepath=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}\n\n[Install]\nWantedBy=multi-user.target\nEOF\n

      Reload the systemd configuration and start Grafana:

      sudo systemctl daemon-reload\nsudo systemctl enable grafana\nsudo systemctl start grafana\n

      Check if Grafana is up and running:

      sudo systemctl status grafana\n

      You should see output indicating that the Grafana service is active and running.

      Alternative: Start Grafana Directly If you would rather start Grafana directly and kick the tires before creating a service file, you can run the following command to start Grafana in the background:
      nohup ${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}/bin/grafana-server --config=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}/conf/defaults.ini --homepath=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION} > grafana.log 2>&1 &\n
      This command uses nohup to run Grafana in the background and redirects the output to grafana.log."},{"location":"quickstart/#5-configuration-file","title":"5. Configuration File","text":"

      By default, Harvest loads its configuration information from the ./harvest.yml file. If you would rather use a different file, use the --config command line argument flag to specify the path to your config file.

      To start collecting metrics, you need to define at least one poller and one exporter in your configuration file. This is useful if you want to monitor resource usage by Harvest and serves as a good example. Feel free to delete it if you want.

      The next step is to add pollers for your ONTAP clusters in the Pollers section of the Harvest configuration file, harvest.yml.

      Edit the Harvest configuration file:

      cd ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64\nvi harvest.yml\n

      Copy and paste the following YAML configuration into your editor and update the $cluster-management-ip, $username, and $password sections to match your ONTAP system.

      Exporters:\n  prometheus1:\n    exporter: Prometheus\n    port_range: 13000-13100\n\nDefaults:\n  collectors:\n    - Zapi\n    - ZapiPerf\n    - Ems\n    - Rest\n    - RestPerf\n  use_insecure_tls: true\n\nPollers:\n  jamaica:\n    datacenter: DC-01\n    addr: $cluster-management-ip\n    auth_style: basic_auth\n    username: $username\n    password: $password\n    exporters:\n      - prometheus1\n

      Note: The ONTAP user specified in this configuration must have the appropriate permissions as outlined in the Prepare cDot Clusters documentation.

      "},{"location":"quickstart/#6-edit-prometheus-config-file","title":"6. Edit Prometheus Config File","text":"

      Edit the Prometheus configuration file:

      cd ${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}\nvi prometheus.yml\n

      Add the following under the scrape_configs section. The targets you are adding should match the range of ports you specified in your harvest.yml file (in the example above, we use the port_range 13000-13100).

        - job_name: 'harvest'\n    static_configs:\n      - targets: ['localhost:13000', 'localhost:13001', 'localhost:13002']  # Add ports as defined in the port range\n

      For example, if your port range in the Harvest configuration is 13000-13100, you should add the ports within this range that you plan to use.

      Restart Prometheus to apply the changes:

      sudo systemctl restart prometheus\n

      Check if Prometheus is up and running:

      sudo systemctl status prometheus\n
      "},{"location":"quickstart/#7-start-harvest","title":"7. Start Harvest","text":"

      To start the Harvest pollers, follow these steps. For more details see Harvest service.

      Create a systemd service file for Harvest pollers:

      cat << EOF | sudo tee /etc/systemd/system/poller@.service\n[Unit]\nDescription=\"NetApp Harvest Poller instance %I\"\nPartOf=harvest.target\nAfter=network-online.target\nWants=network-online.target\n\n[Service]\nUser=harvest\nGroup=harvest\nType=simple\nRestart=on-failure\nExecStart=${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64/bin/harvest --config ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64/harvest.yml start -f %i\n\n[Install]\nWantedBy=harvest.target\nEOF\n

      Create a target file for Harvest:

      cd ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64\nbin/harvest generate systemd | sudo tee /etc/systemd/system/harvest.target\n

      Reload the systemd configuration and start Harvest:

      sudo systemctl daemon-reload\nsudo systemctl enable harvest.target\nsudo systemctl start harvest.target\n

      Verify that the pollers have started successfully by checking their status:

      systemctl status \"poller*\"\n
      Alternative: Start Harvest Directly If you would rather start Harvest directly and kick the tires before creating a service file, you can run the following command to start Harvest:
      cd ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64\nbin/harvest start\n
      Verify that the pollers have started successfully by checking their status:
      bin/harvest status\n
      The output should look similar to this:
      Datacenter | Poller  |   PID   | PromPort | Status\n-----------+---------+---------+----------+----------\nDC-01      | jamaica | 1280145 |    13000 | running\n

      The logs of each poller can be found in /var/log/harvest/.

      "},{"location":"quickstart/#8-add-prometheus-datasource-in-grafana","title":"8. Add Prometheus Datasource in Grafana","text":"

      To add a Prometheus datasource in Grafana, follow these steps:

      1. Open your web browser and navigate to Grafana (http://localhost:3000). When prompted for credentials, use Grafana defaults admin/admin. You should change the default credentials once you log in.
      2. Navigate to the data sources section by visiting http://localhost:3000/connections/datasources or by clicking the hamburger menu (three horizontal lines) at the top-left of the page and navigate to Connections and then Data Sources.
      3. Click on Add data source.
      4. Select Prometheus from the list of available data sources.
      5. In the Prometheus server URL field, enter (http://localhost:9090).
      6. Click on Save and test.
      7. At the bottom of the page, you should see the message 'Successfully queried the Prometheus API.' For detailed instructions, please refer to the configure Prometheus Data Source documentation.
      "},{"location":"quickstart/#9-generate-grafana-api-token","title":"9. Generate Grafana API Token","text":"

      To import Grafana dashboards using the bin/harvest grafana import command, you need a Grafana API token. Follow these steps to generate it:

      1. Open your web browser and navigate to Grafana (http://localhost:3000). Enter your Grafana credentials to log in. The default username and password are admin.
      2. Click the hamburger menu (three horizontal lines) at the top-left of the page and Navigate to Administration -> Users and access and then select Service Account.
      3. Click on Add Service Account.
      4. Enter the display name Harvest.
      5. Set the role to Editor.
      6. Click on Create. The service account will appear in the dashboard.
      7. Navigate back to Service Account.
      8. Click on Add service account token for the Harvest service account.
      9. Click on Generate Token.
      10. Click on Copy to clipboard and close.

      IMPORTANT: This is the only opportunity to save the token. Immediately paste it into a text file and save it. The token will be needed by Harvest later on.

      For detailed instructions, please refer to the Grafana API Keys documentation.

      "},{"location":"quickstart/#10-import-grafana-dashboards","title":"10. Import Grafana Dashboards","text":"

      To import Grafana dashboards, use the following command:

      cd ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64\nbin/harvest grafana import --token YOUR_TOKEN_HERE\n

      Replace YOUR_TOKEN_HERE with the token obtained in step 10.

      You will be prompted to save your API key (token) for later use. Press n to not save the token in your harvest.yml file.

      After a few seconds, all the dashboards will be imported into Grafana.

      "},{"location":"quickstart/#9-verify-dashboards-in-grafana","title":"9. Verify Dashboards in Grafana","text":"

      After adding the Prometheus datasource, you can verify that your dashboards are correctly displaying data. Follow these steps:

      1. Open your web browser and navigate to Grafana (http://localhost:3000). Enter your Grafana credentials to log in. The default username and password are admin.
      2. Click on the \"three lines\" button (also known as the hamburger menu) in the top left corner of the Grafana interface. From the menu, select Dashboards.
      3. Open the Volume dashboard. Once the dashboard opens, you should see volume data displayed.
      "},{"location":"quickstart/#troubleshooting","title":"Troubleshooting","text":"

      If you encounter issues, check the logs in /var/log/harvest and refer to the troubleshooting section on the wiki. You can also reach out for help on Discord or via email at ng-harvest-files@netapp.com.

      "},{"location":"quickstart/#conclusion","title":"Conclusion","text":"

      \ud83c\udf8a Congratulations! You have successfully set up NetApp Harvest, Prometheus, and Grafana. Enjoy monitoring your systems and feel free to reach out on Discord, GitHub, or email.

      "},{"location":"release-notes/","title":"Release Notes","text":"
      • Changelog
      • Releases
      "},{"location":"system-requirements/","title":"System Requirements","text":"

      Harvest is written in Go, which means it runs on recent Linux systems. It also runs on Macs for development.

      Hardware requirements depend on how many clusters you monitor and the number of metrics you chose to collect. With the default configuration, when monitoring 10 clusters, we recommend:

      • CPU: 2 cores
      • Memory: 1 GB
      • Disk: 500 MB (mostly used by log files)

      Note: These CPU, memory, and disk requirements are just for Harvest and do not include Prometheus, InfluxDB, Grafana.

      Harvest is compatible with:

      • Prometheus: 2.33 or higher
      • InfluxDB: v2
      • Grafana: 8.1.X or higher
      • Docker: 20.10.0 or higher and compatible Docker Compose
      "},{"location":"architecture/rest-collector/","title":"REST collector","text":""},{"location":"architecture/rest-collector/#status","title":"Status","text":"

      ~~Accepted~~ Superseded by REST strategy

      The exact version of ONTAP that has full ZAPI parity is subject to change. Everywhere you see version 9.12, may become 9.13 or later.

      "},{"location":"architecture/rest-collector/#context","title":"Context","text":"

      We need to document and communicate to customers: - when they should switch from the ZAPI collectors to the REST ones - what versions of ONTAP are supported by Harvest's REST collectors - how to fill ONTAP gaps between the ZAPI and REST APIs

      The ONTAP version information is important because gaps are addressed in later versions of cDOT.

      "},{"location":"architecture/rest-collector/#considered-options","title":"Considered Options","text":"
      1. Only REST A clean cut-over, stop using ZAPI, and switch completely to REST.

      2. Both Support both ZAPI and REST collectors running at the same time, collecting the same objects. Flexible, but has the downside of last-write wins. Not recommended unless you selectively pick non-overlapping sets of objects.

      3. Template change that supports both Change the template to break ties, priority, etc. Rejected because additional complexity not worth the benefits.

      4. private-cli When there are REST gaps that have not been filled yet or will never be filled (WONTFIX), the Harvest REST collector will provide infrastructure and documentation on how to use private-cli pass-through to address gaps.

      "},{"location":"architecture/rest-collector/#chosen-decision","title":"Chosen Decision","text":"

      For clusters with ONTAP versions < 9.12, we recommend customers use the ZAPI collectors. (#2) (#4)

      Once ONTAP 9.12+ is released and customers have upgraded to it, they should make a clean cut-over to the REST collectors (#1). ONTAP 9.12 is the version of ONTAP that has the best parity with what Harvest collects in terms of config and performance counters. Harvest REST collectors, templates, and dashboards are validated against ONTAP 9.12+. Most of the REST config templates will work before 9.12, but unless you have specific needs, we recommend sticking with the ZAPI collectors until you upgrade to 9.12.

      There is little value in running both the ZAPI and REST collectors for an overlapping set of objects. It's unlikely you want to collect the same object via REST and ZAPI at the same time. Harvest doesn't support this use-case, but does nothing to detect or prevent it.

      If you want to collect a non-overlapping set of objects with REST and ZAPI, you can. If you do, we recommend you disable the ZAPI object collector. For example, if you enable the REST disk template, you should disable the ZAPI disk template. We do NOT recommend collecting an overlapping set of objects with both collectors since the last one to run will overwrite previously collected data.

      Harvest will document how to use the REST private cli pass-through to collect custom and non-public counters.

      The Harvest team recommends that customers open ONTAP issues for REST public API gaps that need filled.

      "},{"location":"architecture/rest-collector/#consequences","title":"Consequences","text":"

      The Harvest REST collectors will work with limitations on earlier versions of ONTAP. ONTAP 9.12+ is the minimally validated version. We only validate the full set of templates, dashboards, counters, etc. on versions of ONTAP 9.12+

      Harvest does not prevent you from collecting the same resource with ZAPI and REST.

      "},{"location":"architecture/rest-strategy/","title":"REST Strategy","text":""},{"location":"architecture/rest-strategy/#status","title":"Status","text":"

      Accepted

      "},{"location":"architecture/rest-strategy/#context","title":"Context","text":"

      ONTAP has published a customer product communiqu\u00e9 (CPC-00410) announcing that ZAPIs will reach end of availability (EOA) in ONTAP 9.13.1 released Q2 2023.

      This document describes how Harvest handles the ONTAP transition from ZAPI to REST. In most cases, no action is required on your part.

      "},{"location":"architecture/rest-strategy/#harvest-api-transition","title":"Harvest API Transition","text":"

      Harvest tries to use the protocol you specify in your harvest.yml config file.

      When specifying the ZAPI collector, Harvest will use the ZAPI protocol unless the cluster no longer speaks Zapi, in which case, Harvest will switch to REST.

      If you specify the REST collector, Harvest will use the REST protocol.

      Harvest includes a full set of REST templates that export identical metrics as the included ZAPI templates. No changes to dashboards or downstream metric-consumers should be required. See below if you have added metrics to the Harvest out-of-the-box templates.

      Read on if you want to know how you can use REST sooner, or you want to take advantage of REST-only features in ONTAP.

      "},{"location":"architecture/rest-strategy/#frequently-asked-questions","title":"Frequently Asked Questions","text":""},{"location":"architecture/rest-strategy/#how-does-harvest-decide-whether-to-use-rest-or-zapi-apis","title":"How does Harvest decide whether to use REST or ZAPI APIs?","text":"

      Harvest attempts to use the collector defined in your harvest.yml config file.

      • If you specify the ZAPI collector, Harvest will use the ZAPI protocol as long as the cluster still speaks Zapi. If the cluster no longer understands Zapi, Harvest will switch to Rest.

      • If you specify the REST collector, Harvest will use REST.

      Earlier versions of Harvest included a prefer_zapi poller option and a HARVEST_NO_COLLECTOR_UPGRADE environment variable. Both of these options are ignored in Harvest versions 23.08 onwards.

      "},{"location":"architecture/rest-strategy/#why-would-i-switch-to-rest-before-9131","title":"Why would I switch to REST before 9.13.1?","text":"
      • You have advanced use cases to validate before ONTAP removes ZAPIs
      • You want to take advantage of new ONTAP features that are only available via REST (e.g., cloud features, event remediation, name services, cluster peers, etc.)
      • You want to collect a metric that is not available via ZAPI
      • You want to collect a metric from the ONTAP CLI. The REST API includes a private CLI pass-through to access any ONTAP CLI command
      "},{"location":"architecture/rest-strategy/#can-i-start-using-rest-before-9131","title":"Can I start using REST before 9.13.1?","text":"

      Yes. Many customers do. Be aware of the following limitations:

      1. ONTAP includes a subset of performance counters via REST beginning in ONTAP 9.11.1.
      2. There may be performance metrics missing from versions of ONTAP earlier than 9.11.1.

      Where performance metrics are concerned, because of point #2, our recommendation is to wait until at least ONTAP 9.12.1 before switching to the RestPerf collector. You can continue using the ZapiPerf collector until you switch.

      "},{"location":"architecture/rest-strategy/#a-counter-is-missing-from-rest-what-do-i-do","title":"A counter is missing from REST. What do I do?","text":"

      The Harvest team has ensured that all the out-of-the-box ZAPI templates have matching REST templates with identical metrics as of Harvest 22.11 and ONTAP 9.12.1. Any additional ZAPI Perf counters you have added may be missing from ONTAP REST Perf.

      Join the Harvest discord channel and ask us about the counter. Sometimes we may know which release the missing counter is coming in, otherwise we can point you to the ONTAP process to request new counters.

      "},{"location":"architecture/rest-strategy/#can-i-use-the-rest-and-zapi-collectors-at-the-same-time","title":"Can I use the REST and ZAPI collectors at the same time?","text":"

      Yes. Harvest ensures that duplicate resources are not collected from both collectors.

      When there is potential duplication, Harvest first resolves the conflict in the order collectors are defined in your poller and then negotiates with the cluster on the most appropriate API to use per above.

      Let's take a look at a few examples using the following poller definition:

      cluster-1:\n    datacenter: dc-1\n    addr: 10.1.1.1\n    collectors:\n        - Zapi\n        - Rest\n
      • When cluster-1 is running ONTAP 9.9.X (ONTAP still supports ZAPIs), the Zapi collector will be used since it is listed first in the list of collectors. When collecting a REST-only resource like, nfs_client, the Rest collector will be used since nfs_client objects are only available via REST.

      • When cluster-1 is running ONTAP 9.18.1 (ONTAP no longer supports ZAPIs), the Rest collector will be used since ONTAP can no longer speak the ZAPI protocol.

      If you want the REST collector to be used in all cases, change the order in the collectors section so Rest comes before Zapi.

      If the resource does not exist for the first collector, the next collector will be tried. Using the example above, when collecting VolumeAnalytics resources, the Zapi collector will not run for VolumeAnalytics objects since that resource is only available via REST. The Rest collector will run and collect the VolumeAnalytics objects.

      "},{"location":"architecture/rest-strategy/#ive-added-counters-to-existing-zapi-templates-will-those-counters-work-in-rest","title":"I've added counters to existing ZAPI templates. Will those counters work in REST?","text":"

      ZAPI config metrics often have a REST equivalent that can be found in ONTAP's ONTAPI to REST mapping document.

      ZAPI performance metrics may be missing in REST. If you have added new metrics or templates to the ZapiPerf collector, those metrics likely aren't available via REST. You can check if the performance counter is available, ask the Harvest team on Discord, or ask ONTAP to add the counter you need.

      "},{"location":"architecture/rest-strategy/#reference","title":"Reference","text":"

      Table of ONTAP versions, dates and API notes.

      ONTAPversion ReleaseDate ONTAPNotes 9.11.1 Q2 2022 First version of ONTAP with REST performance metrics 9.12.1 Q4 2022 ZAPIs still supported - REST performance metrics have parity with Harvest 22.11 collected ZAPI performance metrics 9.13.1 ZAPIs still supported 9.14.1-9.15.1 ZAPIs enabled if ONTAP upgrade detects they were being used earlier. New ONTAP installs default to REST only. ZAPIs may be enabled via CLI 9.16.1-9.17.1 ZAPIs disabled. See ONTAP communique for details on re-enabling 9.18.1 ZAPIs removed. No way to re-enable"},{"location":"help/config-collection/","title":"Harvest Config Collection Guide","text":"

      This guide is designed to help you validate your Harvest configuration (harvest.yml) on various platforms. The commands in this guide will generate redacted output that personally identifiable information (PII) removed. This makes it safe for you to share the output. Follow the instructions specific to your platform. If you wish to share it with the Harvest team, please email them at ng-harvest-files@netapp.com.

      "},{"location":"help/config-collection/#rpm-deb-and-native-installations","title":"RPM, DEB, and Native Installations","text":"

      To print a redacted version of your Harvest configuration to the console, use the following command:

      cd /opt/harvest\nexport CONFIG_FILE_NAME=harvest.yml\nbin/harvest doctor --print --config $CONFIG_FILE_NAME\n
      "},{"location":"help/config-collection/#docker-container","title":"Docker Container","text":"

      For Docker containers, use the following command to print a redacted version of your Harvest configuration to the console:

      cd to/where/your/harvest.yml/is\nexport CONFIG_FILE_NAME=harvest.yml\ndocker run --rm --entrypoint \"bin/harvest\" --volume \"$(pwd)/$CONFIG_FILE_NAME:/opt/harvest/harvest.yml\" ghcr.io/netapp/harvest doctor --print\n
      "},{"location":"help/config-collection/#nabox","title":"NABox","text":"

      If you're using NABox, you'll need to ssh into your NABox instance. Then, use the following command to print a redacted version of your Harvest configuration to the console:

      dc exec -w /conf nabox-harvest2 /netapp-harvest/bin/harvest doctor --print\n

      If your configuration file name is different from the default harvest.yml, remember to change the CONFIG_FILE_NAME environment variable to match your file name.

      "},{"location":"help/faq/","title":"FAQ","text":""},{"location":"help/faq/#how-do-i-migrate-from-harvest-16-to-20","title":"How do I migrate from Harvest 1.6 to 2.0?","text":"

      There currently is not a tool to migrate data from Harvest 1.6 to 2.0. The most common workaround is to run both, 1.6 and 2.0, in parallel. Run both, until the 1.6 data expires due to normal retention policy, and then fully cut over to 2.0.

      Technically, it\u2019s possible to take a Graphite DB, extract the data, and send it to a Prometheus db, but it\u2019s not an area we\u2019ve invested in. If you want to explore that option, check out the promtool which supports importing, but probably not worth the effort.

      "},{"location":"help/faq/#how-do-i-share-sensitive-log-files-with-netapp","title":"How do I share sensitive log files with NetApp?","text":"

      Email them to ng-harvest-files@netapp.com This mail address is accessible to NetApp Harvest employees only.

      "},{"location":"help/faq/#multi-tenancy","title":"Multi-tenancy","text":""},{"location":"help/faq/#question","title":"Question","text":"

      Is there a way to allow per SVM level user views? I need to offer 1 tenant per SVM. Can I limit visibility to specific SVMs? Is there an SVM dashboard available?

      "},{"location":"help/faq/#answer","title":"Answer","text":"

      You can do this with Grafana. Harvest can provide the labels for SVMs. The pieces are there but need to be put together.

      Grafana templates support the $__user variable to make pre-selections and decisions. You can use that + metadata mapping the user <-> SVM. With both of those you can build SVM specific dashboards.

      There is a German service provider who is doing this. They have service managers responsible for a set of customers \u2013 and only want to see the data/dashboards of their corresponding customers.

      "},{"location":"help/faq/#harvest-authentication-and-permissions","title":"Harvest Authentication and Permissions","text":""},{"location":"help/faq/#question_1","title":"Question","text":"

      What permissions does Harvest need to talk to ONTAP?

      "},{"location":"help/faq/#answer_1","title":"Answer","text":"

      Permissions, authentication, role based security, and creating a Harvest user are covered here.

      "},{"location":"help/faq/#ontap-counters-are-missing","title":"ONTAP counters are missing","text":""},{"location":"help/faq/#question_2","title":"Question","text":"

      How do I make Harvest collect additional ONTAP counters?

      "},{"location":"help/faq/#answer_2","title":"Answer","text":"

      Instead of modifying the out-of-the-box templates in the conf/ directory, it is better to create your own custom templates following these instructions.

      "},{"location":"help/faq/#capacity-metrics","title":"Capacity Metrics","text":""},{"location":"help/faq/#question_3","title":"Question","text":"

      How are capacity and other metrics calculated by Harvest?

      "},{"location":"help/faq/#answer_3","title":"Answer","text":"

      Each collector has its own way of collecting and post-processing metrics. Check the documentation of each individual collector (usually under section #Metrics). Capacity and hardware-related metrics are collected by the Zapi collector which emits metrics as they are without any additional calculation. Performance metrics are collected by the ZapiPerf collector and the final values are calculated from the delta of two consequent polls.

      "},{"location":"help/faq/#tagging-volumes","title":"Tagging Volumes","text":""},{"location":"help/faq/#question_4","title":"Question","text":"

      How do I tag ONTAP volumes with metadata and surface that data in Harvest?

      "},{"location":"help/faq/#answer_4","title":"Answer","text":"

      See volume tagging issue and volume tagging via sub-templates

      "},{"location":"help/faq/#rest-and-zapi-documentation","title":"REST and Zapi Documentation","text":""},{"location":"help/faq/#question_5","title":"Question","text":"

      How do I relate ONTAP REST endpoints to ZAPI APIs and attributes?

      "},{"location":"help/faq/#answer_5","title":"Answer","text":"

      Please refer to the ONTAPI to REST API mapping document.

      "},{"location":"help/faq/#sizing","title":"Sizing","text":"

      How much disk space is required by Prometheus?

      This depends on the collectors you've added, # of nodes monitored, cardinality of labels, # instances, retention, ingest rate, etc. A good approximation is to curl your Harvest exporter and count the number of samples that it publishes and then feed that information into a Prometheus sizing formula.

      Prometheus stores an average of 1-2 bytes per sample. To plan the capacity of a Prometheus server, you can use the rough formula: needed_disk_space = retention_time_seconds * ingested_samples_per_second * bytes_per_sample

      A rough approximation is outlined https://devops.stackexchange.com/questions/9298/how-to-calculate-disk-space-required-by-prometheus-v2-2

      "},{"location":"help/faq/#topk-usage-in-grafana","title":"Topk usage in Grafana","text":""},{"location":"help/faq/#question_6","title":"Question","text":"

      In Grafana, why do I see more results from topk than I asked for?

      "},{"location":"help/faq/#answer_6","title":"Answer","text":"

      Topk is one of Prometheus's out-of-the-box aggregation operators, and is used to calculate the largest k elements by sample value.

      Depending on the time range you select, Prometheus will often return more results than you asked for. That's because Prometheus is picking the topk for each time in the graph. In other words, different time series are the topk at different times in the graph. When you use a large duration, there are often many time series.

      This is a limitation of Prometheus and can be mitigated by:

      • reducing the time range to a smaller duration that includes fewer topk results - something like a five to ten minute range works well for most of Harvest's charts
      • the panel's table shows the current topk rows and that data can be used to supplement the additional series shown in the charts

      Additional details: here, here, and here

      "},{"location":"help/faq/#where-are-harvest-container-images-published","title":"Where are Harvest container images published?","text":"

      Harvest container images are published to both GitHub's image registry (ghcr.io) and Docker's image registry (hub.docker.com). By default, ghcr.io is used for pulling images.

      Please note that cr.netapp.io is no longer being maintained. If you have been using cr.netapp.io to pull Harvest images, we encourage you to switch to ghcr.io or Docker Hub as your container image registry. Starting in 2024, we will cease publishing Harvest container images to cr.netapp.io.

      "},{"location":"help/faq/#how-do-i-switch-between-image-registries","title":"How do I switch between image registries?","text":""},{"location":"help/faq/#answer_7","title":"Answer","text":"

      Replace all instances of rahulguptajss/harvest:latest with ghcr.io/netapp/harvest:latest:

      • Edit your docker-compose file and make those replacements or regenerate the compose file.

      • Update any shell or Ansible scripts you have that are also using those images

      • After making these changes, you should stop your containers, pull new images, and restart.

      You can verify that you're using the GitHub Container Registry images like so:

      Before

      docker image ls -a\nREPOSITORY                  TAG       IMAGE ID       CREATED        SIZE\nrahulguptajss/harvest       latest    80061bbe1c2c   10 days ago    56.4MB <=== Docker Hub\nprom/prometheus             v2.33.1   e528f02c45a6   3 weeks ago    204MB\ngrafana/grafana             8.3.4     4a34578e4374   5 weeks ago    274MB\n

      Pull image from GitHub Container Registry

      docker pull ghcr.io/netapp/harvest:latest\nUsing default tag: latest\nlatest: Pulling from ghcr.io/netapp/harvest\nDigest: sha256:6ff88153812ebb61e9dd176182bf8a792cde847748c5654d65f4630e61b1f3ae\nStatus: Image is up to date for ghcr.io/netapp/harvest:latest\nghcr.io/netapp/harvest:latest\n

      Notice that the IMAGE ID for both images are identical since the images are the same.

      docker image ls -a\nREPOSITORY                  TAG       IMAGE ID       CREATED        SIZE\nghcr.io/netapp/harvest      latest    80061bbe1c2c   10 days ago    56.4MB  <== Harvest image from GitHub Container Registry\nrahulguptajss/harvest       latest    80061bbe1c2c   10 days ago    56.4MB\nprom/prometheus             v2.33.1   e528f02c45a6   3 weeks ago    204MB\ngrafana/grafana             8.3.4     4a34578e4374   5 weeks ago    274MB\n

      We can now remove the Docker Hub pulled image

      docker image rm rahulguptajss/harvest:latest\nUntagged: rahulguptajss/harvest:latest\nUntagged: rahulguptajss/harvest@sha256:6ff88153812ebb61e9dd176182bf8a792cde847748c5654d65f4630e61b1f3ae\n\ndocker image ls -a\nREPOSITORY              TAG       IMAGE ID       CREATED        SIZE\nghcr.io/netapp/harvest   latest    80061bbe1c2c   10 days ago    56.4MB\nprom/prometheus         v2.33.1   e528f02c45a6   3 weeks ago    204MB\ngrafana/grafana         8.3.4     4a34578e4374   5 weeks ago    274MB\n
      "},{"location":"help/faq/#ports","title":"Ports","text":""},{"location":"help/faq/#what-ports-does-harvest-use","title":"What ports does Harvest use?","text":""},{"location":"help/faq/#answer_8","title":"Answer","text":"

      The default ports are shown in the following diagram.

      • Harvest's pollers use ZAPI or REST to communicate with ONTAP on port 443
      • Each poller exposes the Prometheus port defined in your harvest.yml file
      • Prometheus scrapes each poller-exposed Prometheus port (promPort1, promPort2, promPort3)
      • Prometheus's default port is 9090
      • Grafana's default port is 3000
      "},{"location":"help/faq/#snapmirror_labels","title":"Snapmirror_labels","text":""},{"location":"help/faq/#why-do-my-snapmirror_labels-have-an-empty-source_node","title":"Why do my snapmirror_labels have an empty source_node?","text":""},{"location":"help/faq/#answer_9","title":"Answer","text":"

      Snapmirror relationships have a source and destination node. ONTAP however does not expose the source side of that relationship, only the destination side is returned via ZAPI/REST APIs. Because of that, the Prometheus metric named, snapmirror_labels, will have an empty source_node label.

      The dashboards show the correct value for source_node since we join multiple metrics in the Grafana panels to synthesize that information.

      In short: don't rely on the snapmirror_labels for source_node labels. If you need source_node you will need to do a similar join as the Snapmirror dashboard does.

      See https://github.com/NetApp/harvest/issues/1192 for more information and linked pull requests for REST and ZAPI.

      "},{"location":"help/faq/#nfs-clients-dashboard","title":"NFS Clients Dashboard","text":""},{"location":"help/faq/#why-do-my-nfs-clients-dashboard-have-no-data","title":"Why do my NFS Clients Dashboard have no data?","text":""},{"location":"help/faq/#answer_10","title":"Answer","text":"

      NFS Clients dashboard is only available through Rest Collector. This information is not available through Zapi. You must enable the Rest collector in your harvest.yml config and uncomment the nfs_clients.yaml section in your default.yaml file.

      Note: Enabling nfs_clients.yaml may slow down data collection.

      "},{"location":"help/faq/#file-analytics-dashboard","title":"File Analytics Dashboard","text":""},{"location":"help/faq/#why-do-my-file-analytics-dashboard-have-no-data","title":"Why do my File Analytics Dashboard have no data?","text":""},{"location":"help/faq/#answer_11","title":"Answer","text":"

      This dashboard requires ONTAP 9.8+ and the APIs are only available via REST. Please enable the REST collector in your harvest config. To collect and display usage data such as capacity analytics, you need to enable File System Analytics on a volume. Please see https://docs.netapp.com/us-en/ontap/task_nas_file_system_analytics_enable.html for more details.

      "},{"location":"help/faq/#why-do-i-have-volume-sis-stat-panel-empty-in-volume-dashboard","title":"Why do I have Volume Sis Stat panel empty in Volume dashboard?","text":""},{"location":"help/faq/#answer_12","title":"Answer","text":"

      This panel requires ONTAP 9.12+ and the APIs are only available via REST. Enable the REST collector in your harvest.yml config.

      "},{"location":"help/log-collection/","title":"Harvest Logs Collection Guide","text":"

      This guide will help you collect Harvest logs on various platforms. Follow the instructions specific to your platform. If you would like to share the collected logs with the Harvest team, please email them to ng-harvest-files@netapp.com.

      If the files are too large to email, let us know at the address above or on Discord, and we'll send you a file sharing link to upload your files.

      "},{"location":"help/log-collection/#rpm-deb-and-native-installations","title":"RPM, DEB, and Native Installations","text":"

      For RPM, DEB, and native installations, use the following command to create a compressed tar file containing the logs:

      tar -czvf harvest_logs.tar.gz -C /var/log harvest\n

      This command will create a file named harvest_logs.tar.gz with the contents of the /var/log/harvest directory.

      "},{"location":"help/log-collection/#docker-container","title":"Docker Container","text":"

      For Docker containers, first, identify the container ID for your Harvest instance. Then, replace <container_id> with the actual container ID in the following command:

      docker logs <container_id> &> harvest_logs.txt && tar -czvf harvest_logs.tar.gz harvest_logs.txt\n

      This command will create a file named harvest_logs.tar.gz containing the logs from the specified container.

      "},{"location":"help/log-collection/#nabox-4","title":"NABox 4","text":"

      Collect a support bundle from the NABox web interface by clicking the About button in the left gutter and then clicking the Download Support Bundle button.

      "},{"location":"help/log-collection/#nabox-3","title":"NABox 3","text":"

      For NABox installations, ssh into your nabox instance, and use the following command to create a compressed tar file containing the logs:

      dc logs nabox-api > nabox-api.log; dc logs nabox-harvest2 > nabox-harvest2.log;\\\n  tar -czf nabox-logs-`date +%Y-%m-%d_%H:%M:%S`.tgz *\n

      This command will create a file named nabox-logs-$date.tgz containing the nabox-api and Harvest poller logs.

      For more information, see the NABox documentation on collecting logs

      "},{"location":"help/troubleshooting/","title":"Checklists for Harvest","text":"

      A set of steps to go through when something goes wrong.

      "},{"location":"help/troubleshooting/#what-version-of-ontap-do-you-have","title":"What version of ONTAP do you have?","text":"

      Run the following, replacing <poller> with the poller from your harvest.yaml

      ./bin/harvest zapi -p <poller> show system\n

      Copy and paste the output into your issue. Here's an example:

      ./bin/harvest -p infinity show system\nconnected to infinity (NetApp Release 9.8P2: Tue Feb 16 03:49:46 UTC 2021)\n[results]                             -                                   *\n  [build-timestamp]                   -                          1613447386\n  [is-clustered]                      -                                true\n  [version]                           - NetApp Release 9.8P2: Tue Feb 16 03:49:46 UTC 2021\n  [version-tuple]                     -                                   *\n    [system-version-tuple]            -                                   *\n      [generation]                    -                                   9\n      [major]                         -                                   8\n      [minor]                         -                                   0\n

      "},{"location":"help/troubleshooting/#install-fails","title":"Install fails","text":"

      I tried to install and ...

      "},{"location":"help/troubleshooting/#how-do-i-tell-if-harvest-is-doing-anything","title":"How do I tell if Harvest is doing anything?","text":"

      You believe Harvest is installed fine, but it's not working.

      • Post the contents of your harvest.yml

      Try validating your harvest.yml with yamllint like so: yamllint -d relaxed harvest.yml If you do not have yamllint installed, look here.

      There should be no errors - warnings like the following are fine:

      harvest.yml\n  64:1      warning  too many blank lines (3 > 0)  (empty-lines)\n

      • How did you start Harvest?

      • What do you see in /var/log/harvest/*

      • What does ps aux | grep poller show?

      • If you are using Prometheus, try hitting Harvest's Prometheus endpoint like so:

      curl http://machine-this-is-running-harvest:prometheus-port-in-harvest-yaml/metrics

      • Check file ownership (user/group) and file permissions of your templates, executable, etc in your Harvest home directory (ls -la /opt/harvest/) See also.
      "},{"location":"help/troubleshooting/#how-do-i-start-harvest-in-debug-mode","title":"How do I start Harvest in debug mode?","text":"

      Use the --debug flag when starting a poller to enable debug logging (--debug is shorthand for --loglevel 1). Another useful flag is --foreground, which causes all log messages to be written to the terminal. Note that you can only start one poller in foreground mode.

      The amount of logged information can be controlled with the --loglevel flag followed by an integer value. The integer values are as follows: - 0: Trace - 1: Debug - 2: Info (default) - 3: Warning - 4: Error - 5: Critical

      Examples:

      bin/harvest start $POLLER_NAME --foreground --debug\nor\nbin/harvest start $POLLER_NAME --loglevel=1 --collectors Zapi --objects Qtree\n
      "},{"location":"help/troubleshooting/#how-do-i-start-harvest-in-foreground-mode","title":"How do I start Harvest in foreground mode?","text":"

      See How do I start Harvest in debug mode?

      "},{"location":"help/troubleshooting/#how-do-i-start-my-poller-with-only-one-collector","title":"How do I start my poller with only one collector?","text":"

      Since a poller will start a large number of collectors (each collector-object pair is treated as a collector), it is often hard to find the issue you are looking for in the abundance of log messages. It might be therefore useful to start one single collector-object pair when troubleshooting. You can use the --collectors and --objects flags for that. For example, start only the ZapiPerf collector with the SystemNode object:

      harvest start my_poller --collectors ZapiPerf --objects SystemNode

      (To find to correct object name, check conf/COLLECTOR/default.yaml file of the collector).

      "},{"location":"help/troubleshooting/#errors-in-the-log-file","title":"Errors in the log file","text":""},{"location":"help/troubleshooting/#some-of-my-clusters-are-not-showing-up-in-grafana","title":"Some of my clusters are not showing up in Grafana","text":"

      The logs show these errors:

      context deadline exceeded (Client.Timeout or context cancellation while reading body)\n\nand then for each volume\n\nskipped instance [9c90facd-3730-48f1-b55c-afacc35c6dbe]: not found in cache\n

      "},{"location":"help/troubleshooting/#workarounds","title":"Workarounds","text":"

      context deadline exceeded (Client.Timeout or context cancellation while reading body)

      means Harvest is timing out when talking to your cluster. This sometimes happens when you have a large number of resources (e.g. volumes).

      There are a few parameters that you can change to avoid this from happening. You can do this by editing the subtemplate of the resource affected. E.g. you can add the parameters in conf/zapiperf/cdot/9.8.0/volume.yaml or conf/zapi/cdot/9.8.0/volume.yaml. If the errors happen for most of the resources, you can add them in the main template of the collector (conf/zapi/default.yaml or conf/zapiperf/default.yaml) to apply them on all objects.

      "},{"location":"help/troubleshooting/#client_timeout","title":"client_timeout","text":"

      Increase the client_timeout value by adding a client_timeout line at the beginning of the template, like so:

      # increase the timeout to 1 minute\nclient_timeout: 1m\n
      "},{"location":"help/troubleshooting/#batch_size","title":"batch_size","text":"

      Decrease the batch_size value by adding a batch_size line at the beginning of the template. The default value of this parameter is 500. By decreasing it, the collector will fetch less instances during each API request. Example:

      # decrease number of instances to 200 for each API request\nbatch_size: 200\n
      "},{"location":"help/troubleshooting/#schedule","title":"schedule","text":"

      If nothing else helps, you can increase the data poll interval of the collector (default is 1m for ZapiPerf and 3m for Zapi). You can do this either by adding a schedule attribute to the template or, if it already exists, by changing the - data line.

      Example for ZapiPerf:

      # increase data poll frequency to 2 minutes\nschedule:\n  - counter: 20m\n  - instance: 10m\n  - data: 2m\n
      Example for Zapi:

      # increase data poll frequency to 5 minutes\nschedule:\n  - instance: 10m\n  - data: 5m\n
      "},{"location":"help/troubleshooting/#prometheus-http-service-discovery-doesnt-work","title":"Prometheus HTTP Service Discovery doesn't work","text":"

      Some things to check:

      • Make sure the Harvest admin node is started via bin/harvest admin start and there are no errors printed to the console
      • Make sure your harvest.yml includes a valid Admin: section
      • Ensure bin/harvest doctor runs without error. If it does, include the output of bin/harvest doctor --print in Slack or your GitHub issue
      • Ensure your /etc/prometheus/prometheus.yml has a scrape config with http_sd_configs and it points to the admin node's ip:port
      • Ensure there are no errors in your poller logs (/var/log/harvest) related to the poller publishing its Prometheus port to the admin node. Something like this should help narrow it down: grep -R -E \"error.*poller.go\" /var/log/harvest/
        • If you see errors like dial udp 1.1.1.1:80: connect: network is unreachable, make sure your machine has a default route setup for your main interface
      • If the admin node is running, your harvest.yml includes the Admin: section, and your pollers are using the Prometheus exporter you should be able to curl the admin node endpoint for a list of running Harvest pollers like this:
        curl -s -k https://localhost:8887/api/v1/sd | jq .\n[\n  {\n    \"targets\": [\n      \":12994\"\n    ],\n    \"labels\": {\n      \"__meta_poller\": \"F2240-127-26\"\n    }\n  },\n  {\n    \"targets\": [\n      \":39000\"\n    ],\n    \"labels\": {\n      \"__meta_poller\": \"simple1\"\n    }\n  }\n]\n
      "},{"location":"help/troubleshooting/#how-do-i-run-harvest-commands-in-nabox","title":"How do I run Harvest commands in NAbox?","text":"

      NAbox is a vApp running Alpine Linux and Docker. NAbox runs Harvest as a set of Docker containers. That means to execute Harvest commands on NAbox, you need to exec into the container by following these commands.

      1. ssh into your NAbox instance

      2. Start bash in the Harvest container

      dc exec nabox-harvest2 bash\n

      You should see no errors and your prompt will change to something like root@nabox-harvest2:/app#

      Below are examples of running Harvest commands against a cluster named umeng-aff300-05-06. Replace with your cluster name as appropriate.

      # inside container\n\n> cat /etc/issue\nDebian GNU/Linux 10 \\n \\l\n\n> cd /netapp-harvest\nbin/harvest version\nharvest version 22.08.0-1 (commit 93db10a) (build date 2022-08-19T09:10:05-0400) linux/amd64\nchecking GitHub for latest... you have the latest \u2713\n\n# harvest.yml is found at /conf/harvest.yml\n\n> bin/zapi --poller umeng-aff300-05-06 show system\nconnected to umeng-aff300-05-06 (NetApp Release 9.9.1P9X3: Tue Apr 19 19:05:24 UTC 2022)\n[results]                                          -                                   *\n  [build-timestamp]                                -                          1650395124\n  [is-clustered]                                   -                                true\n  [version]                                        - NetApp Release 9.9.1P9X3: Tue Apr 19 19:05:24 UTC 2022\n  [version-tuple]                                  -                                   *\n    [system-version-tuple]                         -                                   *\n      [generation]                                 -                                   9\n      [major]                                      -                                   9\n      [minor]                                      -                                   1\n\nbin/zapi -p umeng-aff300-05-06 show data --api environment-sensors-get-iter --max 10000 > env-sensor.xml\n

      The env-sensor.xml file will be written to the /opt/packages/harvest2 directory on the host.

      If needed, you can scp that file off NAbox and share it with the Harvest team.

      "},{"location":"help/troubleshooting/#rest-collector-auth-errors","title":"Rest Collector Auth errors?","text":"

      If you are seeing errors like User is not authorized or not authorized for that command while using Rest Collector. Follow below steps to make sure permissions are set correctly.

      1. Verify that user has permissions for relevant authentication method.

      security login show -vserver ROOT_VSERVER -user-or-group-name harvest2 -application http

      1. Verify that user has read-only permissions to api.
      security login role show -role harvest2-role\n

      1. Verify if an entry is present for following command.
      vserver services web access show -role harvest2-role -name rest\n

      If It is missing then add an entry with following commands

      vserver services web access create -vserver umeng-aff300-01-02 -name rest -role harvest2-role\n
      "},{"location":"help/troubleshooting/#why-do-i-have-gaps-in-my-dashboards","title":"Why do I have gaps in my dashboards?","text":"

      Here are possible reasons and things to check:

      • Prometheus scrape_interval found via (http://$promIP:9090/config)
      • Prometheus log files
      • Harvest collector scrape interval check your:
        • conf/zapi/default.yaml - default for config is 3m
        • conf/zapiperf/default.yaml - default of perf is 1m
      • Check you poller logs for any errors or lag messages
      • When using VictoriaMetrics, make sure your Prometheus exporter config includes sort_labels: true, since VictoriaMetrics will mark series stale if the label order changes between polls.
      "},{"location":"help/troubleshooting/#nabox","title":"NABox","text":"

      For NABox installations, refer to the NABox documentation on troubleshooting:

      NABox Troubleshooting

      "},{"location":"install/containerd/","title":"Containerized Harvest on Mac using containerd","text":"

      Harvest runs natively on a Mac already. If you need that, git clone and use GOOS=darwin make build.

      This page describes how to run Harvest on your Mac in a containerized environment (Compose, K8, etc.) The documentation below uses Rancher Desktop, but lima works just as well. Keep in mind, both of them are considered alpha. They work, but are still undergoing a lot of change.

      "},{"location":"install/containerd/#setup","title":"Setup","text":"

      We're going to: - Install and Start Rancher Desktop - (Optional) Create Harvest Docker image by following Harvest's existing documentation - Generate a Compose file following Harvest existing documentation - Concatenate the Prometheus/Grafana compose file with the harvest compose file since Rancher doesn't support multiple compose files yet - Fixup the concatenated file - Start containers

      Under the hood, Rancher is using lima. If you want to skip Rancher and use lima directly that works too.

      "},{"location":"install/containerd/#install-and-start-rancher-desktop","title":"Install and Start Rancher Desktop","text":"

      We'll use brew to install Rancher.

      brew install rancher\n

      After Rancher Desktop installs, start it Cmd + Space type: Rancher and wait for it to start a VM and download images. Once everything is started continue.

      "},{"location":"install/containerd/#create-harvest-docker-image","title":"Create Harvest Docker image","text":"

      You only need to create a new image if you've made changes to Harvest. If you just want to use the latest version of Harvest, skip this step.

      These are the same steps outline on Building Harvest Docker Image except we replace docker build with nerdctl like so:

      source .harvest.env\nnerdctl build -f container/onePollerPerContainer/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t harvest:latest . --no-cache \n
      "},{"location":"install/containerd/#generate-a-harvest-compose-file","title":"Generate a Harvest compose file","text":"

      Follow the existing documentation to set up your harvest.yml file

      Create your harvest-compose.yml file like this:

      docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml # --image tag, if you built a new image above\n
      "},{"location":"install/containerd/#combine-prometheusgrafana-and-harvest-compose-file","title":"Combine Prometheus/Grafana and Harvest compose file","text":"

      Currently nerdctl compose does not support running with multiple compose files, so we'll concat the prom-stack.yml and the harvest-compose.yml into one file and then fix it up.

      cat prom-stack.yml harvest-compose.yml > both.yml\n\n# jump to line 45 and remove redundant version and services lines (lines 45, 46, 47 should be removed)\n# fix indentation of remaining lines - in vim, starting at line 46\n# Shift V\n# Shift G\n# Shift .\n# Esc\n# Shift ZZ\n
      "},{"location":"install/containerd/#start-containers","title":"Start containers","text":"
      nerdctl compose -f both.yml up -d\n\nnerdctl ps -a\n\nCONTAINER ID    IMAGE                               COMMAND                   CREATED               STATUS    PORTS                       NAMES\nbd7131291960    docker.io/grafana/grafana:latest    \"/run.sh\"                 About a minute ago    Up        0.0.0.0:3000->3000/tcp      grafana\nf911553a14e2    docker.io/prom/prometheus:latest    \"/bin/prometheus --c\u2026\"    About a minute ago    Up        0.0.0.0:9090->9090/tcp      prometheus\n037a4785bfad    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15007->15007/tcp    poller_simple7_v21.11.0513\n03fb951cfe26    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    59 seconds ago        Up        0.0.0.0:15025->15025/tcp    poller_simple25_v21.11.0513\n049d0d65b434    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:16050->16050/tcp    poller_simple49_v21.11.0513\n0b77dd1bc0ff    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:16067->16067/tcp    poller_u2_v21.11.0513\n1cabd1633c6f    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15015->15015/tcp    poller_simple15_v21.11.0513\n1d78c1bf605f    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15062->15062/tcp    poller_sandhya_v21.11.0513\n286271eabc1d    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15010->15010/tcp    poller_simple10_v21.11.0513\n29710da013d4    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:12990->12990/tcp    poller_simple1_v21.11.0513\n321ae28637b6    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15020->15020/tcp    poller_simple20_v21.11.0513\n39c91ae54d68    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15053->15053/tcp    poller_simple-53_v21.11.0513\n\nnerdctl logs poller_simple1_v21.11.0513\nnerdctl compose -f both.yml down\n\n# http://localhost:9090/targets   Prometheus\n# http://localhost:3000           Grafana\n# http://localhost:15062/metrics  Poller metrics\n
      "},{"location":"install/containers/","title":"Docker","text":""},{"location":"install/containers/#overview","title":"Overview","text":"

      Harvest is container-ready and supports several deployment options:

      • Stand-up Prometheus, Grafana, and Harvest via Docker Compose. Choose this if you want to hit the ground running. Install, volume and network mounts automatically handled.

      • Stand-up Harvest via Docker Compose that offers more flexibility in configuration. Choose this if you only want to run Harvest containers. Since you pick-and-choose what gets built and how it's deployed, stronger familiarity with containers is recommended.

      • If you prefer Ansible, David Blackwell created an Ansible script that stands up Harvest, Grafana, and Prometheus.

      • Want to run Harvest on a Mac via containerd and Rancher Desktop? We got you covered.

      • K8 Deployment via Kompose

      "},{"location":"install/containers/#docker-compose","title":"Docker Compose","text":"

      This is a quick way to install and get started with Harvest. Follow the four steps below to:

      • Setup Harvest, Grafana, and Prometheus via Docker Compose
      • Harvest dashboards are automatically imported and setup in Grafana with a Prometheus data source
      • A separate poller container is created for each monitored cluster
      • All pollers are automatically added as Prometheus scrape targets
      "},{"location":"install/containers/#setup-harvestyml","title":"Setup harvest.yml","text":"
      • Create a harvest.yml file with your cluster details, below is an example with annotated comments. Modify as needed for your scenario.

      This config is using the Prometheus exporter port_range feature, so you don't have to manage the Prometheus exporter port mappings for each poller.

      Exporters:\n  prometheus1:\n    exporter: Prometheus\n    addr: 0.0.0.0\n    port_range: 2000-2030  # <====== adjust to be greater than equal to the number of monitored clusters\n\nDefaults:\n  collectors:\n    - Zapi\n    - ZapiPerf\n    - EMS\n  use_insecure_tls: true   # <====== adjust as needed to enable/disable TLS checks \n  exporters:\n    - prometheus1\n\nPollers:\n  infinity:                # <====== add your cluster(s) here, they use the exporter defined three lines above\n    datacenter: DC-01\n    addr: 10.0.1.2\n    auth_style: basic_auth\n    username: user\n    password: 123#abc\n  # next cluster ....  \n
      "},{"location":"install/containers/#generate-a-docker-compose-for-your-pollers","title":"Generate a Docker compose for your Pollers","text":"
      • Generate a Docker compose file from your harvest.yml
      docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml\n

      By default, the above command uses the harvest configuration file(harvest.yml) located in the current directory. If you want to use a harvest config from a different location.

      What if my harvest configuration file is somewhere else or not named harvest.yml

      Use the following docker run command, updating the HYML variable with the absolute path to your harvest.yml.

      HYML=\"/opt/custom_harvest.yml\"; \\\ndocker run --rm \\\n--env UID=$(id -u) --env GID=$(id -g) \\\n--entrypoint \"bin/harvest\" \\\n--volume \"$(pwd):/opt/temp\" \\\n--volume \"${HYML}:${HYML}\" \\\nghcr.io/netapp/harvest:latest \\\ngenerate docker full \\\n--output harvest-compose.yml \\\n--config \"${HYML}\"\n

      generate docker full does two things:

      1. Creates a Docker compose file with a container for each Harvest poller defined in your harvest.yml
      2. Creates a matching Prometheus service discovery file for each Harvest poller (located in container/prometheus/harvest_targets.yml). Prometheus uses this file to scrape the Harvest pollers.
      "},{"location":"install/containers/#start-everything","title":"Start everything","text":"

      Bring everything up

      docker compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans\n
      "},{"location":"install/containers/#note-on-docker-logging-configuration","title":"Note on Docker Logging Configuration","text":"

      By default, Docker uses the json-file logging driver which does not limit the size of the logs. This can cause your system to run out of disk space. Docker provides several options for logging configuration, including different logging drivers and options for log rotation.

      Docker recommends using the local driver to prevent disk-exhaustion. More details can be found in Docker logging documentation

      "},{"location":"install/containers/#prometheus-and-grafana","title":"Prometheus and Grafana","text":"

      The prom-stack.yml compose file creates a frontend and backend network. Prometheus and Grafana publish their admin ports on the front-end network and are routable to the local machine. By default, the Harvest pollers are part of the backend network and also expose their Prometheus web end-points. If you do not want their end-points exposed, add the --port=false option to the generate sub-command in the previous step.

      "},{"location":"install/containers/#prometheus","title":"Prometheus","text":"

      After bringing up the prom-stack.yml compose file, you can check Prometheus's list of targets at http://IP_OF_PROMETHEUS:9090/targets.

      "},{"location":"install/containers/#customize-prometheuss-retention-time","title":"Customize Prometheus's Retention Time","text":"

      By default, prom-stack.yml is configured for a one year data retention period. To increase this, for example, to two years, you can create a specific configuration file and make your changes there. This prevents your custom settings from being overwritten if you regenerate the default prom-stack.yml file. Here's the process:

      • Copy the original prom-stack.yml to a new file named prom-stack-prod.yml:
      cp prom-stack.yml prom-stack-prod.yml\n
      • Edit prom-stack-prod.yml to include the extended data retention setting by updating the --storage.tsdb.retention.time=2y line under the Prometheus service's command section:
      command:\n  - '--config.file=/etc/prometheus/prometheus.yml'\n  - '--storage.tsdb.path=/prometheus'\n  - '--storage.tsdb.retention.time=2y'       # Sets data retention to 2 years\n  - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n  - '--web.console.templates=/usr/share/prometheus/consoles'\n
      • Save the changes to prom-stack-prod.yml.

      Now, you can start your Docker containers with the updated configuration that includes the 1-year data retention period by executing the command below:

      docker compose -f prom-stack-prod.yml -f harvest-compose.yml up -d --remove-orphans\n
      "},{"location":"install/containers/#grafana","title":"Grafana","text":"

      After bringing up the prom-stack.yml compose file, you can access Grafana at http://IP_OF_GRAFANA:3000.

      You will be prompted to create a new password the first time you log in. Grafana's default credentials are

      username: admin\npassword: admin\n
      "},{"location":"install/containers/#manage-pollers","title":"Manage pollers","text":""},{"location":"install/containers/#how-do-i-add-a-new-poller","title":"How do I add a new poller?","text":"
      1. Add poller to harvest.yml
      2. Regenerate compose file by running harvest generate
      3. Run docker compose up, for example,
      docker compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans\n
      "},{"location":"install/containers/#stop-all-containers","title":"Stop all containers","text":"
      docker compose -f prom-stack.yml -f harvest-compose.yml down\n

      If you encounter the following error message while attempting to stop your Docker containers using docker-compose down

      Error response from daemon: Conflict. The container name \"/poller-u2\" is already in use by container\n

      This error is likely due to running docker-compose down from a different directory than where you initially ran docker-compose up.

      To resolve this issue, make sure to run the docker-compose down command from the same directory where you ran docker-compose up. This will ensure that Docker can correctly match the container names and IDs with the directory you are working in. Alternatively, you can stop the Harvest, Prometheus, and Grafana containers by using the following command:

      docker ps -aq --filter \"name=prometheus\" --filter \"name=grafana\" --filter \"name=poller-\" | xargs docker stop | xargs docker rm\n

      Note: Deleting or stopping Docker containers does not remove the data stored in Docker volumes.

      "},{"location":"install/containers/#upgrade-harvest","title":"Upgrade Harvest","text":"

      Note: If you want to keep your historical Prometheus data, and you set up your Docker Compose workflow before Harvest 22.11, please read how to migrate your Prometheus volume before continuing with the upgrade steps below.

      If you need to customize your Prometheus configuration, such as changing the data retention period, please refer to the instructions on customizing the Prometheus configuration.

      To upgrade Harvest:

      1. Retrieve the most recent version of the Harvest Docker image by executing the following command.This is needed since the new version may contain new templates, dashboards, or other files not included in the Docker image.

        docker pull ghcr.io/netapp/harvest\n

      2. Stop all containers

      3. Regenerate your harvest-compose.yml file by running harvest generate. Make sure you don't skip this step. It is essential as it updates local copies of templates and dashboards, which are then mounted to the containers. If this step is skipped, Harvest will run with older templates and dashboards which will likely cause problems. By default, generate will use the latest tag. If you want to upgrade to a nightly build see the twisty.

        I want to upgrade to a nightly build

        Tell the generate cmd to use a different tag like so:

        docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest:nightly \\\n  generate docker full \\\n  --image ghcr.io/netapp/harvest:nightly \\\n  --output harvest-compose.yml\n
      4. Restart your containers using the following:

        docker compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans\n
        Troubleshooting

        If you encounter the following error:

        network harvest_backend was found but has incorrect label com.docker.compose.network set to \"harvest_backend\"\n

        Remove the conflicting networks:

        docker network rm harvest_backend harvest_frontend\n

        Then, restart your containers again using the command above.

      "},{"location":"install/containers/#building-harvest-docker-image","title":"Building Harvest Docker Image","text":"

      Building a custom Harvest Docker image is only necessary if you require a tailored solution. If your intention is to run Harvest using Docker without any customizations, please refer to the Overview section above.

      source .harvest.env\ndocker build -f container/onePollerPerContainer/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t harvest:latest . --no-cache\n
      "},{"location":"install/harvest-containers/","title":"Harvest containers","text":"

      Follow this method if your goal is to establish a separate harvest container for each poller defined in harvest.yml file. Please note that these containers must be incorporated into your current infrastructure, which might include systems like Prometheus or Grafana.

      "},{"location":"install/harvest-containers/#setup-harvestyml","title":"Setup harvest.yml","text":"
      • Create a harvest.yml file with your cluster details, below is an example with annotated comments. Modify as needed for your scenario.

      This config is using the Prometheus exporter port_range feature, so you don't have to manage the Prometheus exporter port mappings for each poller.

      Exporters:\n  prometheus1:\n    exporter: Prometheus\n    addr: 0.0.0.0\n    port_range: 2000-2030  # <====== adjust to be greater than equal to the number of monitored clusters\n\nDefaults:\n  collectors:\n    - Zapi\n    - ZapiPerf\n    - EMS\n  use_insecure_tls: true   # <====== adjust as needed to enable/disable TLS checks \n  exporters:\n    - prometheus1\n\nPollers:\n  infinity:                # <====== add your cluster(s) here, they use the exporter defined three lines above\n    datacenter: DC-01\n    addr: 10.0.1.2\n    auth_style: basic_auth\n    username: user\n    password: 123#abc\n  # next cluster ....  \n
      "},{"location":"install/harvest-containers/#generate-a-docker-compose-for-your-pollers","title":"Generate a Docker compose for your Pollers","text":"
      • Generate a Docker compose file from your harvest.yml
      docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker \\\n  --output harvest-compose.yml\n
      "},{"location":"install/harvest-containers/#start-everything","title":"Start everything","text":"

      Bring everything up

      docker compose -f harvest-compose.yml up -d --remove-orphans\n
      "},{"location":"install/harvest-containers/#manage-pollers","title":"Manage pollers","text":""},{"location":"install/harvest-containers/#how-do-i-add-a-new-poller","title":"How do I add a new poller?","text":"
      1. Add poller to harvest.yml
      2. Regenerate compose file by running harvest generate
      3. Run docker compose up, for example,
      docker compose -f harvest-compose.yml up -d --remove-orphans\n
      "},{"location":"install/harvest-containers/#stop-all-containers","title":"Stop all containers","text":"
      docker compose-f harvest-compose.yml down\n
      "},{"location":"install/harvest-containers/#upgrade-harvest","title":"Upgrade Harvest","text":"

      To upgrade Harvest:

      1. Retrieve the most recent version of the Harvest Docker image by executing the following command.This is needed since the new version may contain new templates, dashboards, or other files not included in the Docker image.

        docker pull ghcr.io/netapp/harvest\n

      2. Stop all containers

      3. Regenerate your harvest-compose.yml file by running harvest generate By default, generate will use the latest tag. If you want to upgrade to a nightly build see the twisty.

        I want to upgrade to a nightly build

        Tell the generate cmd to use a different tag like so:

        docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest:nightly \\\n  generate docker \\\n  --image ghcr.io/netapp/harvest:nightly \\\n  --output harvest-compose.yml\n
      4. Restart your containers using the following:

      docker compose -f harvest-compose.yml up -d --remove-orphans\n
      "},{"location":"install/k8/","title":"K8 Deployment","text":"

      The following steps are provided for reference purposes only. Depending on the specifics of your k8 configuration, you may need to make modifications to the steps or files as necessary.

      "},{"location":"install/k8/#requirements","title":"Requirements","text":"
      • Kompose: v1.25 or higher
      "},{"location":"install/k8/#deployment","title":"Deployment","text":"
      • Local k8 Deployment
      • Cloud Deployment
      "},{"location":"install/k8/#local-k8-deployment","title":"Local k8 Deployment","text":"

      To run Harvest resources in Kubernetes, please execute the following commands:

      1. After adding your clusters to harvest.yml, generate harvest-compose.yml and prom-stack.yml.
      docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml\n
      example harvest.yml

      Tools:\nExporters:\n    prometheus1:\n        exporter: Prometheus\n        port_range: 12990-14000\nDefaults:\n    use_insecure_tls: true\n    collectors:\n      - Zapi\n      - ZapiPerf\n    exporters:\n      - prometheus1\nPollers:\n    u2:\n        datacenter: u2\n        addr: ADDRESS\n        username: USER\n        password: PASS\n

      harvest-compose.yml

      version: \"3.7\"\n\nservices:\n\n  u2:\n    image: ghcr.io/netapp/harvest:latest\n    container_name: poller-u2\n    restart: unless-stopped\n    ports:\n      - 12990:12990\n    command: '--poller u2 --promPort 12990 --config /opt/harvest.yml'\n    volumes:\n      - /Users/harvest/conf:/opt/harvest/conf\n      - /Users/harvest/cert:/opt/harvest/cert\n      - /Users/harvest/harvest.yml:/opt/harvest.yml\n    networks:\n      - backend\n

      1. Using kompose, convert harvest-compose.yml and prom-stack.yml into Kubernetes resources and save them as kub.yaml.
      kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n
      kub.yaml

      ---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.service.type: nodeport\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: grafana\n  name: grafana\nspec:\n  ports:\n    - name: \"3000\"\n      port: 3000\n      targetPort: 3000\n  selector:\n    io.kompose.service: grafana\n  type: NodePort\nstatus:\n  loadBalancer: {}\n\n---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.service.type: nodeport\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: prometheus\n  name: prometheus\nspec:\n  ports:\n    - name: \"9090\"\n      port: 9090\n      targetPort: 9090\n  selector:\n    io.kompose.service: prometheus\n  type: NodePort\nstatus:\n  loadBalancer: {}\n\n---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2\nspec:\n  ports:\n    - name: \"12990\"\n      port: 12990\n      targetPort: 12990\n  selector:\n    io.kompose.service: u2\nstatus:\n  loadBalancer: {}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.service.type: nodeport\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: grafana\n  name: grafana\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      io.kompose.service: grafana\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      annotations:\n        kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n        kompose.service.type: nodeport\n        kompose.version: 1.28.0 (HEAD)\n      creationTimestamp: null\n      labels:\n        io.kompose.network/harvest-backend: \"true\"\n        io.kompose.network/harvest-frontend: \"true\"\n        io.kompose.service: grafana\n    spec:\n      containers:\n        - image: grafana/grafana:8.3.4\n          name: grafana\n          ports:\n            - containerPort: 3000\n          resources: {}\n          volumeMounts:\n            - mountPath: /var/lib/grafana\n              name: grafana-data\n            - mountPath: /etc/grafana/provisioning\n              name: grafana-hostpath1\n      restartPolicy: Always\n      volumes:\n        - hostPath:\n            path: /Users/harvest\n          name: grafana-data\n        - hostPath:\n            path: /Users/harvest/grafana\n          name: grafana-hostpath1\nstatus: {}\n\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  creationTimestamp: null\n  name: harvest-backend\nspec:\n  ingress:\n    - from:\n        - podSelector:\n            matchLabels:\n              io.kompose.network/harvest-backend: \"true\"\n  podSelector:\n    matchLabels:\n      io.kompose.network/harvest-backend: \"true\"\n\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  creationTimestamp: null\n  name: harvest-frontend\nspec:\n  ingress:\n    - from:\n        - podSelector:\n            matchLabels:\n              io.kompose.network/harvest-frontend: \"true\"\n  podSelector:\n    matchLabels:\n      io.kompose.network/harvest-frontend: \"true\"\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.service.type: nodeport\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: prometheus\n  name: prometheus\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      io.kompose.service: prometheus\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      annotations:\n        kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n        kompose.service.type: nodeport\n        kompose.version: 1.28.0 (HEAD)\n      creationTimestamp: null\n      labels:\n        io.kompose.network/harvest-backend: \"true\"\n        io.kompose.service: prometheus\n    spec:\n      containers:\n        - args:\n            - --config.file=/etc/prometheus/prometheus.yml\n            - --storage.tsdb.path=/prometheus\n            - --web.console.libraries=/usr/share/prometheus/console_libraries\n            - --web.console.templates=/usr/share/prometheus/consoles\n          image: prom/prometheus:v2.33.1\n          name: prometheus\n          ports:\n            - containerPort: 9090\n          resources: {}\n          volumeMounts:\n            - mountPath: /etc/prometheus\n              name: prometheus-hostpath0\n            - mountPath: /prometheus\n              name: prometheus-data\n      restartPolicy: Always\n      volumes:\n        - hostPath:\n            path: /Users/harvest/container/prometheus\n          name: prometheus-hostpath0\n        - hostPath:\n            path: /Users/harvest\n          name: prometheus-data\nstatus: {}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      io.kompose.service: u2\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      annotations:\n        kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n        kompose.version: 1.28.0 (HEAD)\n      creationTimestamp: null\n      labels:\n        io.kompose.network/harvest-backend: \"true\"\n        io.kompose.service: u2\n    spec:\n      containers:\n        - args:\n            - --poller\n            - u2\n            - --promPort\n            - \"12990\"\n            - --config\n            - /opt/harvest.yml\n          image: ghcr.io/netapp/harvest:latest\n          name: poller-u2\n          ports:\n            - containerPort: 12990\n          resources: {}\n          volumeMounts:\n            - mountPath: /opt/harvest/conf\n              name: u2-hostpath0\n            - mountPath: /opt/harvest/cert\n              name: u2-hostpath1\n            - mountPath: /opt/harvest.yml\n              name: u2-hostpath2\n      restartPolicy: Always\n      volumes:\n        - hostPath:\n            path: /Users/harvest/conf\n          name: u2-hostpath0\n        - hostPath:\n            path: /Users/harvest/cert\n          name: u2-hostpath1\n        - hostPath:\n            path: /Users/harvest/harvest.yml\n          name: u2-hostpath2\nstatus: {}\n

      1. Apply kub.yaml to k8.
      kubectl apply --filename kub.yaml\n
      1. List running pods.
      kubectl get pods\n
      pods

      NAME                          READY   STATUS    RESTARTS   AGE\nprometheus-666fc7b64d-xfkvk   1/1     Running   0          43m\ngrafana-7cd8bdc9c9-wmsxh      1/1     Running   0          43m\nu2-7dfb76b5f6-zbfm6           1/1     Running   0          43m\n

      "},{"location":"install/k8/#remove-all-harvest-resources-from-k8","title":"Remove all Harvest resources from k8","text":"

      kubectl delete --filename kub.yaml

      "},{"location":"install/k8/#helm-chart","title":"Helm Chart","text":"

      Generate helm charts

      kompose convert --file harvest-compose.yml --file prom-stack.yml --chart --volumes hostPath --out harvestchart\n
      "},{"location":"install/k8/#cloud-deployment","title":"Cloud Deployment","text":"

      We will use configMap to generate Kubernetes resources for deploying Harvest pollers in a cloud environment. Please note the following assumptions for the steps below:

      • The steps provided are solely for the deployment of Harvest poller pods. Separate configurations are required to set up Prometheus and Grafana.
      • Networking between Harvest and Prometheus must be configured, and this can be accomplished by adding the network configuration in harvest-compose.yaml.

      • After configuring the clusters in harvest.yml, generate harvest-compose.yml. We also want to remove the conf directory from the harvest-compose.yml file, otherwise kompose will create an empty configMap for it. We'll remove the conf directory by commenting out that line using sed.

      docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml\n\nsed -i '/\\/conf/s/^/#/g' harvest-compose.yml\n
      harvest-compose.yml

      version: \"3.7\"\n\nservices:\n\n  u2:\n    image: ghcr.io/netapp/harvest:latest\n    container_name: poller-u2\n    restart: unless-stopped\n    ports:\n      - 12990:12990\n    command: '--poller u2 --promPort 12990 --config /opt/harvest.yml'\n    volumes:\n      #      - /Users/harvest/conf:/opt/harvest/conf\n      - /Users/harvest/cert:/opt/harvest/cert\n      - /Users/harvest/harvest.yml:/opt/harvest.yml\n

      1. Using kompose, convert harvest-compose.yml into Kubernetes resources and save them as kub.yaml.
      kompose convert --file harvest-compose.yml --volumes configMap -o kub.yaml\n
      kub.yaml

      ---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --volumes configMap -o kub.yaml\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2\nspec:\n  ports:\n    - name: \"12990\"\n      port: 12990\n      targetPort: 12990\n  selector:\n    io.kompose.service: u2\nstatus:\n  loadBalancer: {}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --volumes configMap -o kub.yaml\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      io.kompose.service: u2\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      annotations:\n        kompose.cmd: kompose convert --file harvest-compose.yml --volumes configMap -o kub.yaml\n        kompose.version: 1.28.0 (HEAD)\n      creationTimestamp: null\n      labels:\n        io.kompose.network/harvest-default: \"true\"\n        io.kompose.service: u2\n    spec:\n      containers:\n        - args:\n            - --poller\n            - u2\n            - --promPort\n            - \"12990\"\n            - --config\n            - /opt/harvest.yml\n          image: ghcr.io/netapp/harvest:latest\n          name: poller-u2\n          ports:\n            - containerPort: 12990\n          resources: {}\n          volumeMounts:\n            - mountPath: /opt/harvest/cert\n              name: u2-cm0\n            - mountPath: /opt/harvest.yml\n              name: u2-cm1\n              subPath: harvest.yml\n      restartPolicy: Always\n      volumes:\n        - configMap:\n            name: u2-cm0\n          name: u2-cm0\n        - configMap:\n            items:\n              - key: harvest.yml\n                path: harvest.yml\n            name: u2-cm1\n          name: u2-cm1\nstatus: {}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2-cm0\n\n---\napiVersion: v1\ndata:\n  harvest.yml: |+\n    Tools:\n    Exporters:\n        prometheus1:\n            exporter: Prometheus\n            port_range: 12990-14000\n            add_meta_tags: false\n    Defaults:\n        use_insecure_tls: true\n        prefer_zapi: true\n    Pollers:\n\n        u2:\n            datacenter: u2\n            addr: ADDRESS\n            username: USER\n            password: PASS\n            collectors:\n                - Rest\n            exporters:\n                - prometheus1\n\nkind: ConfigMap\nmetadata:\n  annotations:\n    use-subpath: \"true\"\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2-cm1\n\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  creationTimestamp: null\n  name: harvest-default\nspec:\n  ingress:\n    - from:\n        - podSelector:\n            matchLabels:\n              io.kompose.network/harvest-default: \"true\"\n  podSelector:\n    matchLabels:\n      io.kompose.network/harvest-default: \"true\"\n

      1. Apply kub.yaml to k8.
      kubectl apply --filename kub.yaml\n
      1. List running pods.
      kubectl get pods\n
      pods

      NAME                  READY   STATUS    RESTARTS   AGE\nu2-6864cc7dbc-v6444   1/1     Running   0          6m27s\n

      "},{"location":"install/k8/#remove-all-harvest-resources-from-k8_1","title":"Remove all Harvest resources from k8","text":"

      kubectl delete --filename kub.yaml

      "},{"location":"install/k8/#helm-chart_1","title":"Helm Chart","text":"

      Generate helm charts

      kompose convert --file harvest-compose.yml --chart --volumes configMap --out harvestchart\n
      "},{"location":"install/native/","title":"Native","text":""},{"location":"install/native/#installation","title":"Installation","text":"

      Visit the Releases page and copy the tar.gz link for the latest release. For example, to download the 23.08.0 release:

      VERSION=23.08.0\nwget https://github.com/NetApp/harvest/releases/download/v${VERSION}/harvest-${VERSION}-1_linux_amd64.tar.gz\ntar -xvf harvest-${VERSION}-1_linux_amd64.tar.gz\ncd harvest-${VERSION}-1_linux_amd64\n\n# Run Harvest with the default unix localhost collector\nbin/harvest start\n
      With curl

      If you don't have wget installed, you can use curl like so:

      curl -L -O https://github.com/NetApp/harvest/releases/download/v22.08.0/harvest-22.08.0-1_linux_amd64.tar.gz\n
      "},{"location":"install/native/#upgrade","title":"Upgrade","text":"

      Stop Harvest:

      cd <existing harvest directory>\nbin/harvest stop\n

      Verify that all pollers have stopped:

      bin/harvest status\nor\npgrep --full '\\-\\-poller'  # should return nothing if all pollers are stopped\n

      Download the latest release and extract it to a new directory. For example, to upgrade to the 23.11.0 release:

      VERSION=23.11.0\nwget https://github.com/NetApp/harvest/releases/download/v${VERSION}/harvest-${VERSION}-1_linux_amd64.tar.gz\ntar -xvf harvest-${VERSION}-1_linux_amd64.tar.gz\ncd harvest-${VERSION}-1_linux_amd64\n

      Copy your old harvest.yml into the new install directory:

      cp /path/to/old/harvest/harvest.yml /path/to/new/harvest/harvest.yml\n

      After upgrade, re-import all dashboards (either bin/harvest grafana import cli or via the Grafana UI) to get any new enhancements in dashboards. For more details, see the dashboards documentation.

      It's best to run Harvest as a non-root user. Make sure the user running Harvest can write to /var/log/harvest/ or tell Harvest to write the logs somewhere else with the HARVEST_LOGS environment variable.

      If something goes wrong, examine the logs files in /var/log/harvest, check out the troubleshooting section on the wiki and jump onto Discord and ask for help.

      "},{"location":"install/overview/","title":"Overview","text":"

      Get up and running with Harvest on your preferred platform. We provide pre-compiled binaries for Linux, RPMs, Debs, as well as prebuilt container images for both nightly and stable releases.

      • Binaries for Linux
      • RPM and Debs
      • Containers
      "},{"location":"install/overview/#nabox","title":"Nabox","text":"

      Instructions on how to install Harvest via NAbox.

      "},{"location":"install/overview/#source","title":"Source","text":"

      To build Harvest from source code follow these steps.

      1. git clone https://github.com/NetApp/harvest.git
      2. cd harvest
      3. check the version of go required in the go.mod file
      4. ensure you have a working Go environment at that version or newer. Go installs found here.
      5. make build (if you want to run Harvest from a Mac use GOOS=darwin make build)
      6. bin/harvest version

      Checkout the Makefile for other targets of interest.

      "},{"location":"install/package-managers/","title":"Package Managers","text":""},{"location":"install/package-managers/#redhat","title":"Redhat","text":"

      Installation and upgrade of the Harvest package may require root or administrator privileges

      "},{"location":"install/package-managers/#installation","title":"Installation","text":"

      Download the latest rpm of Harvest from the releases tab and install with yum.

      sudo yum install harvest.XXX.rpm\n
      "},{"location":"install/package-managers/#upgrade","title":"Upgrade","text":"

      Download the latest rpm of Harvest from the releases tab and upgrade with yum.

      sudo yum upgrade harvest.XXX.rpm\n

      Once the installation or upgrade has finished, edit the harvest.yml configuration file located in /opt/harvest/harvest.yml

      After editing /opt/harvest/harvest.yml, manage Harvest with systemctl start|stop|restart harvest.

      After upgrade, re-import all dashboards (either bin/harvest grafana import cli or via the Grafana UI) to get any new enhancements in dashboards. For more details, see the dashboards documentation.

      To ensure that you don't run into permission issues, make sure you manage Harvest using systemctl instead of running the harvest binary directly.

      Changes install makes
      • Directories /var/log/harvest/ and /var/log/run/ are created
      • A harvest user and group are created and the installed files are chowned to harvest
      • Systemd /etc/systemd/system/harvest.service file is created and enabled
      "},{"location":"install/package-managers/#debian","title":"Debian","text":"

      Installation and upgrade of the Harvest package may require root or administrator privileges

      "},{"location":"install/package-managers/#installation_1","title":"Installation","text":"

      Download the latest deb of Harvest from the releases tab and install with apt.

      sudo apt install ./harvest-<RELEASE>.amd64.deb\n
      "},{"location":"install/package-managers/#upgrade_1","title":"Upgrade","text":"

      Download the latest deb of Harvest from the releases tab and upgrade with apt.

      sudo apt install --only-upgrade ./harvest-<RELEASE>.amd64.deb\n

      Once the installation or upgrade has finished, edit the harvest.yml configuration file located in /opt/harvest/harvest.yml

      After editing /opt/harvest/harvest.yml, manage Harvest with systemctl start|stop|restart harvest.

      After upgrade, re-import all dashboards (either bin/harvest grafana import cli or via the Grafana UI) to get any new enhancements in dashboards. For more details, see the dashboards documentation.

      To ensure that you don't run into permission issues, make sure you manage Harvest using systemctl instead of running the harvest binary directly.

      Changes install makes
      • Directories /var/log/harvest/ and /var/log/run/ are created
      • A harvest user and group are created and the installed files are chowned to harvest
      • Systemd /etc/systemd/system/harvest.service file is created and enabled
      "},{"location":"install/podman/","title":"Containerized Harvest on Linux using Rootless Podman","text":"

      RHEL 8 ships with Podman instead of Docker. There are two ways to run containers with Podman: rootless or with root. Both setups are outlined below. The Podman ecosystem is changing rapidly so the shelf life of these instructions may be short. Make sure you have at least the same versions of the tools listed below.

      If you don't want to bother with Podman, you can also install Docker on RHEL 8 and use it to run Harvest per normal.

      "},{"location":"install/podman/#setup","title":"Setup","text":"

      Make sure your OS is up-to-date with yum update. Podman's dependencies are updated frequently.

      sudo yum remove docker-ce\nsudo yum module enable -y container-tools:rhel8\nsudo yum module install -y container-tools:rhel8\nsudo yum install podman podman-docker podman-plugins\n

      We also need to install Docker Compose since Podman uses it for compose workflows. Install docker-compose like this:

      VERSION=1.29.2\nsudo curl -L \"https://github.com/docker/compose/releases/download/$VERSION/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose\nsudo chmod +x /usr/local/bin/docker-compose\nsudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose\n

      After all the packages are installed, start the Podman systemd socket-activated service:

      sudo systemctl start podman.socket\n
      "},{"location":"install/podman/#containerized-harvest-on-linux-using-rootful-podman","title":"Containerized Harvest on Linux using Rootful Podman","text":"

      Make sure you're able to curl the endpoint.

      sudo curl -H \"Content-Type: application/json\" --unix-socket /var/run/docker.sock http://localhost/_ping\n

      If the sudo curl does not print OK\u23ce troubleshoot before continuing.

      Proceed to Running Harvest

      "},{"location":"install/podman/#containerized-harvest-on-linux-using-rootless-podman_1","title":"Containerized Harvest on Linux using Rootless Podman","text":"

      To run Podman rootless, we'll create a non-root user named: harvest to run Harvest.

      # as root or sudo\nusermod --append --groups wheel harvest\n

      Login with the harvest user, set up the podman.socket, and make sure the curl below works. su or sudo aren't sufficient, you need to ssh into the machine as the harvest user or use machinectl login. See sudo-rootless-podman for details.

      # these must be run as the harvest user\nsystemctl --user enable podman.socket\nsystemctl --user start podman.socket\nsystemctl --user status podman.socket\nexport DOCKER_HOST=unix:///run/user/$UID/podman/podman.sock\n\nsudo curl -H \"Content-Type: application/json\" --unix-socket /var/run/docker.sock http://localhost/_ping\n

      If the sudo curl does not print OK\u23ce troubleshoot before continuing.

      Run podman info and make sure runRoot points to /run/user/$UID/containers (see below). If it doesn't, you'll probably run into problems when restarting the machine. See errors after rebooting.

      podman info | grep runRoot\n  runRoot: /run/user/1001/containers\n
      "},{"location":"install/podman/#running-harvest","title":"Running Harvest","text":"

      By default, Cockpit runs on port 9090, same as Prometheus. We'll change Prometheus's host port to 9091, so we can run both Cockpit and Prometheus. Line 2 below does that.

      With these changes, the standard Harvest compose instructions can be followed as normal now. In summary,

      1. Add the clusters, exporters, etc. to your harvest.yml file
      2. Generate a compose file from your harvest.yml by running

        docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml \\\n  --promPort 9091\n
      3. Bring everything up

        docker-compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans\n

      After starting the containers, you can view them with podman ps -a or using Cockpit https://host-ip:9090/podman.

      podman ps -a\nCONTAINER ID  IMAGE                                   COMMAND               CREATED        STATUS            PORTS                     NAMES\n45fd00307d0a  ghcr.io/netapp/harvest:latest           --poller unix --p...  5 seconds ago  Up 5 seconds ago  0.0.0.0:12990->12990/tcp  poller_unix_v21.11.0\nd40585bb903c  localhost/prom/prometheus:latest        --config.file=/et...  5 seconds ago  Up 5 seconds ago  0.0.0.0:9091->9090/tcp    prometheus\n17a2784bc282  localhost/grafana/grafana:latest                              4 seconds ago  Up 5 seconds ago  0.0.0.0:3000->3000/tcp    grafana\n
      "},{"location":"install/podman/#troubleshooting","title":"Troubleshooting","text":"

      Check Podman's troubleshooting docs

      "},{"location":"install/podman/#nothing-works","title":"Nothing works","text":"

      Make sure the DOCKER_HOST env variable is set and that this curl works.

      sudo curl -H \"Content-Type: application/json\" --unix-socket /var/run/docker.sock http://localhost/_ping\n

      Make sure your containers can talk to each other.

      ping prometheus\nPING prometheus (10.88.2.3): 56 data bytes\n64 bytes from 10.88.2.3: seq=0 ttl=42 time=0.059 ms\n64 bytes from 10.88.2.3: seq=1 ttl=42 time=0.065 ms\n
      "},{"location":"install/podman/#errors-after-rebooting","title":"Errors after rebooting","text":"

      After restarting the machine, I see errors like these when running podman ps.

      podman ps -a\nERRO[0000] error joining network namespace for container 424df6c: error retrieving network namespace at /run/user/1001/netns/cni-5fb97adc-b6ef-17e8-565b-0481b311ba09: failed to Statfs \"/run/user/1001/netns/cni-5fb97adc-b6ef-17e8-565b-0481b311ba09\": no such file or directory\n

      Run podman info and make sure runRoot points to /run/user/$UID/containers (see below). If it instead points to /tmp/podman-run-$UID you will likely have problems when restarting the machine. Typically, this happens because you used su to become the harvest user or ran podman as root. You can fix this by logging in as the harvest user and running podman system reset.

      podman info | grep runRoot\n  runRoot: /run/user/1001/containers\n
      "},{"location":"install/podman/#linger-errors","title":"Linger errors","text":"

      When you logout, systemd may remove some temp files and tear down Podman's rootless network. Workaround is to run the following as the harvest user. Details here

      loginctl enable-linger\n
      "},{"location":"install/podman/#versions","title":"Versions","text":"

      The following versions were used to validate this workflow.

      podman version\n\nVersion:      3.2.3\nAPI Version:  3.2.3\nGo Version:   go1.15.7\nBuilt:        Thu Jul 29 11:02:43 2021\nOS/Arch:      linux/amd64\n\ndocker-compose -v\ndocker-compose version 1.29.2, build 5becea4c\n\ncat /etc/redhat-release\nRed Hat Enterprise Linux release 8.4 (Ootpa)\n
      "},{"location":"install/podman/#references","title":"References","text":"
      • https://github.com/containers/podman
      • https://www.redhat.com/sysadmin/sudo-rootless-podman
      • https://www.redhat.com/sysadmin/podman-docker-compose
      • https://fedoramagazine.org/use-docker-compose-with-podman-to-orchestrate-containers-on-fedora/
      • https://podman.io/getting-started/network.html mentions the need for podman-plugins, otherwise rootless containers running in separate containers cannot see each other
      • Troubleshoot Podman
      "},{"location":"resources/ems-alert-runbook/","title":"EMS Alert Runbook","text":"

      This document describes each ONTAP event management system (EMS) event that Harvest collects and remediation steps.

      "},{"location":"resources/ems-alert-runbook/#aws-credentials-not-initialized","title":"AWS Credentials Not Initialized","text":"

      Impact: Availability

      EMS Event: cloud.aws.iamNotInitialized

      This event occurs when a module attempts to access Amazon Web Services (AWS) Identity and Access Management (IAM) role-based credentials from the cloud credentials thread before they are initialized.

      Remediation

      Wait for the cloud credential thread, as well as the system, to complete initialization.

      "},{"location":"resources/ems-alert-runbook/#antivirus-server-busy","title":"Antivirus Server Busy","text":"

      Impact: Availability

      EMS Event: Nblade.vscanConnBackPressure

      The antivirus server is too busy to accept any new scan requests.

      Remediation

      If this message occurs frequently, ensure that there are enough antivirus servers to handle the virus scan load generated by the SVM.

      "},{"location":"resources/ems-alert-runbook/#cloud-tier-unreachable","title":"Cloud Tier Unreachable","text":"

      Impact: Availability

      EMS Event: object.store.unavailable

      A storage node cannot connect to Cloud Tier object store API. Some data will be inaccessible.

      Remediation

      If you use on-premises products, perform the following corrective actions:

      1. Verify that your intercluster LIF is online and functional by using the \"network interface show\" command.
      2. Check the network connectivity to the object store server by using the \"ping\" command over the destination node intercluster LIF.
      3. Ensure the following: a. The configuration of your object store has not changed. b. The login and connectivity information is still valid. Contact NetApp technical support if the issue persists.

      If you use Cloud Volumes ONTAP, perform the following corrective actions:

      1. Ensure that the configuration of your object store has not changed.
      2. Ensure that the login and connectivity information is still valid. Contact NetApp technical support if the issue persists.
      "},{"location":"resources/ems-alert-runbook/#disk-out-of-service","title":"Disk Out of Service","text":"

      Impact: Availability

      EMS Event: disk.outOfService

      This event occurs when a disk is removed from service because it has been marked failed, is being sanitized, or has entered the Maintenance Center.

      "},{"location":"resources/ems-alert-runbook/#disk-shelf-power-supply-discovered","title":"Disk Shelf Power Supply Discovered","text":"

      Impact: Configuration

      EMS Event: diskShelf.psu.added

      This message occurs when a power supply unit is added to the disk shelf.

      "},{"location":"resources/ems-alert-runbook/#disk-shelves-power-supply-removed","title":"Disk Shelves Power Supply Removed","text":"

      Impact: Availability

      EMS Event: diskShelf.psu.removed

      This message occurs when a power supply unit is removed from the disk shelf.

      "},{"location":"resources/ems-alert-runbook/#fc-target-port-commands-exceeded","title":"FC Target Port Commands Exceeded","text":"

      Impact: Availability

      EMS Event: scsitarget.fct.port.full

      The number of outstanding commands on the physical FC target port exceeds the supported limit. The port does not have sufficient buffers for the outstanding commands. It is overrun or the fan-in is too steep because too many initiator I/Os are using it.

      Remediation

      Perform the following corrective actions:

      1. Evaluate the host fan-in on the port, and perform one of the following actions: a. Reduce the number of hosts that log in to this port. b. Reduce the number of LUNs accessed by the hosts that log in to this port. c. Reduce the host command queue depth.
      2. Monitor the \"queue_full\" counter on the \"fcp_port\" CM object, and ensure that it does not increase. For example: statistics show -object fcp_port -counter queue_full -instance port.portname -raw
      3. Monitor the threshold counter and ensure that it does not increase. For example: statistics show -object fcp_port -counter threshold_full -instance port.portname -raw
      "},{"location":"resources/ems-alert-runbook/#fabricpool-mirror-replication-resync-completed","title":"FabricPool Mirror Replication Resync Completed","text":"

      Impact: Capacity

      EMS Event: wafl.ca.resync.complete

      This message occurs when Data ONTAP(R) completes the resync process from the primary object store to the mirror object store for a mirrored FabricPool aggregate.

      "},{"location":"resources/ems-alert-runbook/#fabricpool-space-usage-limit-nearly-reached","title":"FabricPool Space Usage Limit Nearly Reached","text":"

      Impact: Capacity

      EMS Event: fabricpool.nearly.full

      The total cluster-wide FabricPool space usage of object stores from capacity-licensed providers has nearly reached the licensed limit.

      Remediation

      Perform the following corrective actions:

      1. Check the percentage of the licensed capacity used by each FabricPool storage tier by using the \"storage aggregate object-store show-space\" command.
      2. Delete Snapshot copies from volumes with the tiering policy \"snapshot\" or \"backup\" by using the \"volume snapshot delete\" command to clear up space.
      3. Install a new license on the cluster to increase the licensed capacity.
      "},{"location":"resources/ems-alert-runbook/#fabricpool-space-usage-limit-reached","title":"FabricPool Space Usage Limit Reached","text":"

      Impact: Capacity

      EMS Event: fabricpool.full

      The total cluster-wide FabricPool space usage of object stores from capacity-licensed providers has reached the license limit.

      Remediation

      Perform the following corrective actions:

      1. Check the percentage of the licensed capacity used by each FabricPool storage tier by using the \"storage aggregate object-store show-space\" command.
      2. Delete Snapshot copies from volumes with the tiering policy \"snapshot\" or \"backup\" by using the \"volume snapshot delete\" command to clear up space.
      3. Install a new license on the cluster to increase the licensed capacity.
      "},{"location":"resources/ems-alert-runbook/#fanout-snapmirror-relationship-common-snapshot-deleted","title":"Fanout SnapMirror Relationship Common Snapshot Deleted","text":"

      Impact: Protection

      EMS Event: sms.fanout.comm.snap.deleted

      This message occurs when an older Snapshot(tm) copy is deleted as part of a SnapMirror\u00ae Synchronous resynchronize or update (common Snapshot copy) operation, which could lead to a \"no common Snapshot scenario\" between the synchronous and asynchronous disaster recovery (DR) copies that share the same source volume. If there is no common Snapshot copy between the synchronous and asynchronous DR copies, then a re-baseline will need to be performed during a disaster recovery.

      Remediation

      You can ignore this message if there is no asynchronous relationship configured for the synchronous source volume. If there is an asynchronous relationship configured, then update the asynchronous relationship by using the \"snapmirror update\" command. The SnapMirror update operation will transfer the snapshots that will act as common snapshots between the synchronous and asynchronous destinations.

      "},{"location":"resources/ems-alert-runbook/#giveback-of-storage-pool-failed","title":"Giveback of Storage Pool Failed","text":"

      Impact: Availability

      EMS Event: gb.netra.ca.check.failed

      This event occurs during the migration of an storage pool (aggregate) as part of a storage failover (SFO) giveback, when the destination node cannot reach the object stores.

      Remediation

      Perform the following corrective actions:

      1. Verify that your intercluster LIF is online and functional by using the \"network interface show\" command.
      2. Check network connectivity to the object store server by using the\"'ping\" command over the destination node intercluster LIF.
      3. Verify that the configuration of your object store has not changed and that login and connectivity information is still accurate by using the \"aggregate object-store config show\" command.

      Alternatively, you can override the error by specifying false for the \"require-partner-waiting\" parameter of the giveback command.

      Contact NetApp technical support for more information or assistance.

      "},{"location":"resources/ems-alert-runbook/#ha-interconnect-down","title":"HA Interconnect Down","text":"

      Impact: Availability

      EMS Event: callhome.hainterconnect.down

      The high-availability (HA) interconnect is down. Risk of service outage when failover is not available.

      Remediation

      Corrective actions depend on the number and type of HA interconnect links supported by the platform, as well as the reason why the interconnect is down.

      • If the links are down:
        • Verify that both controllers in the HA pair are operational.
        • For externally connected links, make sure that the interconnect cables are connected properly and that the small form-factor pluggables (SFPs), if applicable, are seated properly on both controllers.
        • For internally connected links, disable and re-enable the links, one after the other, by using the \"ic link off\" and \"ic link on\" commands.
      • If links are disabled, enable the links by using the \"ic link on\" command.
      • If a peer is not connected, disable and re-enable the links, one after the other, by using the \"ic link off\" and \"ic link on\" commands.

      Contact NetApp technical support if the issue persists.

      "},{"location":"resources/ems-alert-runbook/#lun-destroyed","title":"LUN Destroyed","text":"

      Impact: Availability

      EMS Event: LUN.destroy

      This event occurs when a LUN is destroyed.

      "},{"location":"resources/ems-alert-runbook/#lun-offline","title":"LUN Offline","text":"

      Impact: Availability

      EMS Event: LUN.offline

      This message occurs when a LUN is brought offline manually.

      Remediation

      Bring the LUN back online.

      "},{"location":"resources/ems-alert-runbook/#main-unit-fan-failed","title":"Main Unit Fan Failed","text":"

      Impact: Availability

      EMS Event: monitor.fan.failed

      One or more main unit fans have failed. The system remains operational.

      However, if the condition persists for too long, the overtemperature might trigger an automatic shutdown.

      Remediation

      Reseat the failed fans. If the error persists, replace them.

      "},{"location":"resources/ems-alert-runbook/#main-unit-fan-in-warning-state","title":"Main Unit Fan in Warning State","text":"

      Impact: Availability

      EMS Event: monitor.fan.warning

      This event occurs when one or more main unit fans are in a warning state.

      Remediation

      Replace the indicated fans to avoid overheating.

      "},{"location":"resources/ems-alert-runbook/#max-sessions-per-user-exceeded","title":"Max Sessions Per User Exceeded","text":"

      Impact: Availability

      EMS Event: Nblade.cifsMaxSessPerUsrConn

      You have exceeded the maximum number of sessions allowed per user over a TCP connection. Any request to establish a session will be denied until some sessions are released.

      Remediation

      Perform the following corrective actions:

      1. Inspect all the applications that run on the client, and terminate any that are not operating properly.
      2. Reboot the client.
      3. Check if the issue is caused by a new or existing application: a. If the application is new, set a higher threshold for the client by using the \"cifs option modify -max-opens-same-file-per-tree\" command. In some cases, clients operate as expected, but require a higher threshold. You should have advanced privilege to set a higher threshold for the client. b. If the issue is caused by an existing application, there might be an issue with the client. Contact NetApp technical support for more information or assistance.
      "},{"location":"resources/ems-alert-runbook/#max-times-open-per-file-exceeded","title":"Max Times Open Per File Exceeded","text":"

      Impact: Availability

      EMS Event: Nblade.cifsMaxOpenSameFile

      You have exceeded the maximum number of times that you can open the file over a TCP connection. Any request to open this file will be denied until you close some open instances of the file. This typically indicates abnormal application behavior.

      Remediation

      Perform the following corrective actions:

      1. Inspect the applications that run on the client using this TCP connection. The client might be operating incorrectly because of the application running on it.
      2. Reboot the client.
      3. Check if the issue is caused by a new or existing application: a. If the application is new, set a higher threshold for the client by using the \"cifs option modify -max-opens-same-file-per-tree\" command. In some cases, clients operate as expected, but require a higher threshold. You should have advanced privilege to set a higher threshold for the client. b. If the issue is caused by an existing application, there might be an issue with the client. Contact NetApp technical support for more information or assistance.
      "},{"location":"resources/ems-alert-runbook/#metrocluster-automatic-unplanned-switchover-disabled","title":"MetroCluster Automatic Unplanned Switchover Disabled","text":"

      Impact: Availability

      EMS Event: mcc.config.auso.stDisabled

      This message occurs when automatic unplanned switchover capability is disabled.

      Remediation

      Run the \"metrocluster modify -node-name -automatic-switchover-onfailure true\" command for each node in the cluster to enable automatic switchover."},{"location":"resources/ems-alert-runbook/#metrocluster-monitoring","title":"MetroCluster Monitoring","text":"

      Impact: Availability

      EMS Event: hm.alert.raised

      Aggregate was left behind during switchback.

      Remediation

      1) Check the aggregate state by using the command \"aggr show\". 2) If the aggregate is online, return it to its original owner by using the command \"metrocluster switchback\".

      "},{"location":"resources/ems-alert-runbook/#nfsv4-store-pool-exhausted","title":"NFSv4 Store Pool Exhausted","text":"

      Impact: Availability

      EMS Event: Nblade.nfsV4PoolExhaust

      A NFSv4 store pool has been exhausted.

      Remediation

      If the NFS server is unresponsive for more than 10 minutes after this event, contact NetApp technical support.

      "},{"location":"resources/ems-alert-runbook/#nvme-namespace-destroyed","title":"NVMe Namespace Destroyed","text":"

      Impact: Availability

      EMS Event: NVMeNS.destroy

      This event occurs when an NVMe namespace is destroyed.

      "},{"location":"resources/ems-alert-runbook/#nvme-namespace-offline","title":"NVMe Namespace Offline","text":"

      Impact: Availability

      EMS Event: NVMeNS.offline

      This event occurs when an NVMe namespace is brought offline manually.

      "},{"location":"resources/ems-alert-runbook/#nvme-namespace-online","title":"NVMe Namespace Online","text":"

      Impact: Availability

      EMS Event: NVMeNS.online

      This event occurs when an NVMe namespace is brought online manually.

      "},{"location":"resources/ems-alert-runbook/#nvme-of-license-grace-period-active","title":"NVMe-oF License Grace Period Active","text":"

      Impact: Availability

      EMS Event: nvmf.graceperiod.active

      This event occurs on a daily basis when the NVMe over Fabrics (NVMe-oF) protocol is in use and the grace period of the license is active. The NVMe-oF functionality requires a license after the license grace period expires. NVMe-oF functionality is disabled when the license grace period is over.

      Remediation

      Contact your sales representative to obtain an NVMe-oF license, and add it to the cluster, or remove all instances of NVMe-oF configuration from the cluster.

      "},{"location":"resources/ems-alert-runbook/#nvme-of-license-grace-period-expired","title":"NVMe-oF License Grace Period Expired","text":"

      Impact: Availability

      EMS Event: nvmf.graceperiod.expired

      The NVMe over Fabrics (NVMe-oF) license grace period is over and the NVMe-oF functionality is disabled.

      Remediation

      Contact your sales representative to obtain an NVMe-oF license, and add it to the cluster.

      "},{"location":"resources/ems-alert-runbook/#nvme-of-license-grace-period-start","title":"NVMe-oF License Grace Period Start","text":"

      Impact: Availability

      EMS Event: nvmf.graceperiod.start

      The NVMe over Fabrics (NVMe-oF) configuration was detected during the upgrade to ONTAP 9.5 software. NVMe-oF functionality requires a license after the license grace period expires.

      Remediation

      Contact your sales representative to obtain an NVMe-oF license, and add it to the cluster.

      "},{"location":"resources/ems-alert-runbook/#nvram-battery-low","title":"NVRAM Battery Low","text":"

      Impact: Availability

      EMS Event: callhome.battery.low

      The NVRAM battery capacity is critically low. There might be a potential data loss if the battery runs out of power.

      Your system generates and transmits an AutoSupport or \"call home\" message to NetApp technical support and the configured destinations if it is configured to do so. The successful delivery of an AutoSupport message significantly improves problem determination and resolution.

      Remediation

      Perform the following corrective actions:

      1. View the battery's current status, capacity, and charging state by using the \"system node environment sensors show\" command.
      2. If the battery was replaced recently or the system was non-operational for an extended period of time, monitor the battery to verify that it is charging properly.
      3. Contact NetApp technical support if the battery runtime continues to decrease below critical levels, and the storage system shuts down automatically.
      "},{"location":"resources/ems-alert-runbook/#netbios-name-conflict","title":"NetBIOS Name Conflict","text":"

      Impact: Availability

      EMS Event: Nblade.cifsNbNameConflict

      The NetBIOS Name Service has received a negative response to a name registration request, from a remote machine. This is typically caused by a conflict in the NetBIOS name or an alias. As a result, clients might not be able to access data or connect to the right data-serving node in the cluster.

      Remediation

      Perform any one of the following corrective actions:

      • If there is a conflict in the NetBIOS name or an alias, perform one of the following:
        • Delete the duplicate NetBIOS alias by using the \"vserver cifs delete -aliases alias -vserver vserver\" command.
        • Rename a NetBIOS alias by deleting the duplicate name and adding an alias with a new name by using the \"vserver cifs create -aliases alias -vserver vserver\" command.
      • If there are no aliases configured and there is a conflict in the NetBIOS name, then rename the CIFS server by using the \"vserver cifs delete -vserver vserver\" and \"vserver cifs create -cifs-server netbiosname\" commands. NOTE: Deleting a CIFS server can make data inaccessible.
      • Remove NetBIOS name or rename the NetBIOS on the remote machine.
      "},{"location":"resources/ems-alert-runbook/#no-registered-scan-engine","title":"No Registered Scan Engine","text":"

      Impact: Availability

      EMS Event: Nblade.vscanNoRegdScanner

      The antivirus connector notified ONTAP that it does not have a registered scan engine. This might cause data unavailability if the \"scan-mandatory\" option is enabled.

      Remediation

      Perform the following corrective actions:

      1. Ensure that the scan engine software installed on the antivirus server is compatible with ONTAP.
      2. Ensure that scan engine software is running and configured to connect to the antivirus connector over local loopback.
      "},{"location":"resources/ems-alert-runbook/#no-vscan-connection","title":"No Vscan Connection","text":"

      Impact: Availability

      EMS Event: Nblade.vscanNoScannerConn

      ONTAP has no Vscan connection to service virus scan requests. This might cause data unavailability if the \"scan-mandatory\" option is enabled.

      Remediation

      Ensure that the scanner pool is properly configured and the antivirus servers are active and connected to ONTAP.

      "},{"location":"resources/ems-alert-runbook/#node-panic","title":"Node Panic","text":"

      Impact: Performance

      EMS Event: sk.panic

      This event is issued when a panic occurs.

      Remediation

      Contact NetApp customer support.

      "},{"location":"resources/ems-alert-runbook/#node-root-volume-space-low","title":"Node Root Volume Space Low","text":"

      Impact: Capacity

      EMS Event: mgmtgwd.rootvolrec.low.space

      The system has detected that the root volume is dangerously low on space. The node is not fully operational. Data LIFs might have failed over within the cluster, because of which NFS and CIFS access is limited on the node. Administrative capability is limited to local recovery procedures for the node to clear up space on the root volume.

      Remediation

      Perform the following corrective actions:

      1. Clear up space on the root volume by deleting old Snapshot copies, deleting files you no longer need from the /mroot directory, or expanding the root volume capacity.
      2. Reboot the controller.

      Contact NetApp technical support for more information or assistance.

      "},{"location":"resources/ems-alert-runbook/#non-responsive-antivirus-server","title":"Non-responsive AntiVirus Server","text":"

      Impact: Availability

      EMS Event: Nblade.vscanConnInactive

      This event occurs when ONTAP(R) detects a non-responsive antivirus (AV) server and forcibly closes its Vscan connection.

      Remediation

      Ensure that the AV server installed on the AV connector can connect to the Storage Virtual Machine (SVM) and receive the scan requests.

      "},{"location":"resources/ems-alert-runbook/#nonexistent-admin-share","title":"Nonexistent Admin Share","text":"

      Impact: Availability

      EMS Event: Nblade.cifsNoPrivShare

      Vscan issue: a client has attempted to connect to a nonexistent ONTAP_ADMIN$ share.

      Remediation

      Ensure that Vscan is enabled for the mentioned SVM ID. Enabling Vscan on a SVM causes the ONTAP_ADMIN$ share to be created for the SVM automatically.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-added","title":"ONTAP Mediator Added","text":"

      Impact: Protection

      EMS Event: sm.mediator.added

      This message occurs when ONTAP Mediator is added successfully on a cluster.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-ca-certificate-expired","title":"ONTAP Mediator CA Certificate Expired","text":"

      Impact: Protection

      EMS Event: sm.mediator.cacert.expired

      This message occurs when the ONTAP Mediator certificate authority (CA) certificate has expired. As a result, all further communication to the ONTAP Mediator will not be possible.

      Remediation

      Remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Update a new CA certificate on the ONTAP Mediator server. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-ca-certificate-expiring","title":"ONTAP Mediator CA Certificate Expiring","text":"

      Impact: Protection

      EMS Event: sm.mediator.cacert.expiring

      This message occurs when the ONTAP Mediator certificate authority (CA) certificate is due to expire within the next 30 days.

      Remediation

      Before this certificate expires, remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Update a new CA certificate on the ONTAP Mediator server. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-client-certificate-expired","title":"ONTAP Mediator Client Certificate Expired","text":"

      Impact: Protection

      EMS Event: sm.mediator.clientc.expired

      This message occurs when the ONTAP Mediator client certificate has expired. As a result, all further communication to the ONTAP Mediator will not be possible.

      Remediation

      Remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-client-certificate-expiring","title":"ONTAP Mediator Client Certificate Expiring","text":"

      Impact: Protection

      EMS Event: sm.mediator.clientc.expiring

      This message occurs when the ONTAP Mediator client certificate is due to expire within the next 30 days.

      Remediation

      Before this certificate expires, remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-not-accessible","title":"ONTAP Mediator Not Accessible","text":"

      Impact: Protection

      EMS Event: sm.mediator.misconfigured

      This message occurs when either the ONTAP Mediator is repurposed or the Mediator package is no longer installed on the Mediator server. As a result, SnapMirror failover is not possible.

      Remediation

      Remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-removed","title":"ONTAP Mediator Removed","text":"

      Impact: Protection

      EMS Event: sm.mediator.removed

      This message occurs when ONTAP Mediator is removed successfully from a cluster.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-server-certificate-expired","title":"ONTAP Mediator Server Certificate Expired","text":"

      Impact: Protection

      EMS Event: sm.mediator.serverc.expired

      This message occurs when the ONTAP Mediator server certificate has expired. As a result, all further communication to the ONTAP Mediator will not be possible.

      Remediation

      Remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Update a new server certificate on the ONTAP Mediator server. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-server-certificate-expiring","title":"ONTAP Mediator Server Certificate Expiring","text":"

      Impact: Protection

      EMS Event: sm.mediator.serverc.expiring

      This message occurs when the ONTAP Mediator server certificate is due to expire within the next 30 days.

      Remediation

      Before this certificate expires, remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Update a new server certificate on the ONTAP Mediator server. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

      "},{"location":"resources/ems-alert-runbook/#ontap-mediator-unreachable","title":"ONTAP Mediator Unreachable","text":"

      Impact: Protection

      EMS Event: sm.mediator.unreachable

      This message occurs when the ONTAP Mediator is unreachable on a cluster. As a result, SnapMirror failover is not possible.

      Remediation

      Check the network connectivity to the ONTAP Mediator by using the \"network ping\" and \"network traceroute\" commands. If the issue persists, remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

      "},{"location":"resources/ems-alert-runbook/#object-store-host-unresolvable","title":"Object Store Host Unresolvable","text":"

      Impact: Availability

      EMS Event: objstore.host.unresolvable

      The object store server host name cannot be resolved to an IP address. The object store client cannot communicate with the object-store server without resolving to an IP address. As a result, data might be inaccessible.

      Remediation

      Check the DNS configuration to verify that the host name is configured correctly with an IP address.

      "},{"location":"resources/ems-alert-runbook/#object-store-intercluster-lif-down","title":"Object Store Intercluster LIF Down","text":"

      Impact: Availability

      EMS Event: objstore.interclusterlifDown

      The object-store client cannot find an operational LIF to communicate with the object store server. The node will not allow object store client traffic until the intercluster LIF is operational. As a result, data might be inaccessible.

      Remediation

      Perform the following corrective actions:

      1. Check the intercluster LIF status by using the \"network interface show -role intercluster\" command.
      2. Verify that the intercluster LIF is configured correctly and operational.
      3. If an intercluster LIF is not configured, add it by using the \"network interface create -role intercluster\" command.
      "},{"location":"resources/ems-alert-runbook/#object-store-signature-mismatch","title":"Object Store Signature Mismatch","text":"

      Impact: Availability

      EMS Event: osc.signatureMismatch

      The request signature sent to the object store server does not match the signature calculated by the client. As a result, data might be inaccessible.

      Remediation

      Verify that the secret access key is configured correctly. If it is configured correctly, contact NetApp technical support for assistance.

      "},{"location":"resources/ems-alert-runbook/#qos-monitor-memory-maxed-out","title":"QoS Monitor Memory Maxed Out","text":"

      Impact: Capacity

      EMS Event: qos.monitor.memory.maxed

      This event occurs when a QoS subsystem's dynamic memory reaches its limit for the current platform hardware. As a result, some QoS features might operate in a limited capacity.

      Remediation

      Delete some active workloads or streams to free up memory. Use the \"statistics show -object workload -counter ops\" command to determine which workloads are active. Active workloads show non-zero ops. Then use the \"workload delete \" command multiple times to remove specific workloads. Alternatively, use the \"stream delete -workload *\" command to delete the associated streams from the active workload."},{"location":"resources/ems-alert-runbook/#readdir-timeout","title":"READDIR Timeout","text":"

      Impact: Availability

      EMS Event: wafl.readdir.expired

      A READDIR file operation has exceeded the timeout that it is allowed to run in WAFL. This can be because of very large or sparse directories. Corrective action is recommended.

      Remediation

      Perform the following corrective actions:

      1. Find information specific to recent directories that have had READDIR file operations expire by using the following 'diag' privilege nodeshell CLI command: wafl readdir notice show.
      2. Check if directories are indicated as sparse or not: a. If a directory is indicated as sparse, it is recommended that you copy the contents of the directory to a new directory to remove the sparseness of the directory file. b. If a directory is not indicated as sparse and the directory is large, it is recommended that you reduce the size of the directory file by reducing the number of file entries in the directory.
      "},{"location":"resources/ems-alert-runbook/#ransomware-activity-detected","title":"Ransomware Activity Detected","text":"

      Impact: Security

      EMS Event: callhome.arw.activity.seen

      To protect the data from the detected ransomware, a Snapshot copy has been taken that can be used to restore original data.

      Your system generates and transmits an AutoSupport or \"call home\" message to NetApp technical support and any configured destinations. AutoSupport message improves problem determination and resolution.

      Remediation

      Refer to the anti-ransomware documentation to take remedial measures for ransomware activity. If you need assistance, contact NetApp technical support.

      "},{"location":"resources/ems-alert-runbook/#relocation-of-storage-pool-failed","title":"Relocation of Storage Pool Failed","text":"

      Impact: Availability

      EMS Event: arl.netra.ca.check.failed

      This event occurs during the relocation of an storage pool (aggregate), when the destination node cannot reach the object stores.

      Remediation

      Perform the following corrective actions:

      1. Verify that your intercluster LIF is online and functional by using the \"network interface show\" command.
      2. Check network connectivity to the object store server by using the\"'ping\" command over the destination node intercluster LIF.
      3. Verify that the configuration of your object store has not changed and that login and connectivity information is still accurate by using the \"aggregate object-store config show\" command.

      Alternatively, you can override the error by using the \"override-destination-checks\" parameter of the relocation command.

      Contact NetApp technical support for more information or assistance.

      "},{"location":"resources/ems-alert-runbook/#san-active-active-state-changed","title":"SAN \"active-active\" State Changed","text":"

      Impact: Availability

      EMS Event: scsiblade.san.config.active

      The SAN pathing is no longer symmetric. Pathing should be asymmetric only on ASA, because AFF and FAS are both asymmetric.

      Remediation

      Try and enable the \"active-active\" state. Contact customer support if the problem persists.

      "},{"location":"resources/ems-alert-runbook/#sfp-in-fc-target-adapter-receiving-low-power","title":"SFP in FC target adapter receiving low power","text":"

      Impact: Availability

      EMS Event: scsitarget.fct.sfpRxPowerLow

      This alert occurs when the power received (RX) by a small form-factor pluggable transceiver (SFP in FC target) is at a level below the defined threshold, which might indicate a failing or faulty part.

      Remediation

      Monitor the operating value. If it continues to decrease, then replace the SFP and/or the cables.

      "},{"location":"resources/ems-alert-runbook/#sfp-in-fc-target-adapter-transmitting-low-power","title":"SFP in FC target adapter transmitting low power","text":"

      Impact: Availability

      EMS Event: scsitarget.fct.sfpTxPowerLow

      This alert occurs when the power transmitted (TX) by a small form-factor pluggable transceiver (SFP in FC target) is at a level below the defined threshold, which might indicate a failing or faulty part.

      Remediation

      Monitor the operating value. If it continues to decrease, then replace the SFP and/or the cables.

      "},{"location":"resources/ems-alert-runbook/#service-processor-heartbeat-missed","title":"Service Processor Heartbeat Missed","text":"

      Impact: Availability

      EMS Event: callhome.sp.hbt.missed

      This message occurs when ONTAP does not receive an expected \"heartbeat\" signal from the Service Processor (SP). Along with this message, log files from SP will be sent out for debugging. ONTAP will reset the SP to attempt to restore communication. The SP will be unavailable for up to two minutes while it reboots.

      Remediation

      Contact NetApp technical support.

      "},{"location":"resources/ems-alert-runbook/#service-processor-heartbeat-stopped","title":"Service Processor Heartbeat Stopped","text":"

      Impact: Availability

      EMS Event: callhome.sp.hbt.stopped

      This message occurs when ONTAP is no longer receiving heartbeats from the Service Processor (SP). Depending on the hardware design, the system may continue to serve data or may determine to shut down to prevent data loss or hardware damage. The system continues to serve data, but because the SP might not be working, the system cannot send notifications of down appliances, boot errors, or Open Firmware (OFW) Power-On Self-Test (POST) errors. If your system is configured to do so, it generates and transmits an AutoSupport (or 'call home') message to NetApp technical support and to the configured destinations. Successful delivery of an AutoSupport message significantly improves problem determination and resolution.

      Remediation

      If the system has shut down, attempt a hard power cycle: Pull the controller out from the chassis, push it back in then power on the system. Contact NetApp technical support if the problem persists after the power cycle, or for any other condition that may warrant attention.

      "},{"location":"resources/ems-alert-runbook/#service-processor-not-configured","title":"Service Processor Not Configured","text":"

      Impact: Availability

      EMS Event: sp.notConfigured

      This event occurs on a weekly basis, to remind you to configure the Service Processor (SP). The SP is a physical device that is incorporated into your system to provide remote access and remote management capabilities. You should configure the SP to use its full functionality.

      Remediation

      Perform the following corrective actions:

      1. Configure the SP by using the \"system service-processor network modify\" command.
      2. Optionally, obtain the MAC address of the SP by using the \"system service-processor network show\" command.
      3. Verify the SP network configuration by using the \"system service-processor network show\" command.
      4. Verify that the SP can send an AutoSupport email by using the \"system service-processor autosupport invoke\" command. NOTE: AutoSupport email hosts and recipients should be configured in ONTAP before you issue this command.
      "},{"location":"resources/ems-alert-runbook/#service-processor-offline","title":"Service Processor Offline","text":"

      Impact: Availability

      EMS Event: sp.ipmi.lost.shutdown

      ONTAP is no longer receiving heartbeats from the Service Processor (SP), even though all the SP recovery actions have been taken. ONTAP cannot monitor the health of the hardware without the SP.

      The system will shut down to prevent hardware damage and data loss. Set up a panic alert to be notified immediately if the SP goes offline.

      Remediation

      Power-cycle the system by performing the following actions:

      1. Pull the controller out from the chassis.
      2. Push the controller back in.
      3. Turn the controller back on. If the problem persists, replace the controller module.
      "},{"location":"resources/ems-alert-runbook/#shadow-copy-failed","title":"Shadow Copy Failed","text":"

      Impact: Availability

      EMS Event: cifs.shadowcopy.failure

      A Volume Shadow Copy Service (VSS), a Microsoft Server backup and restore service operation, has failed.

      Remediation

      Check the following using the information provided in the event message:

      • Is shadow copy configuration enabled?
      • Are the appropriate licenses installed?
      • On which shares is the shadow copy operation performed?
      • Is the share name correct?
      • Does the share path exist?
      • What are the states of the shadow copy set and its shadow copies?
      "},{"location":"resources/ems-alert-runbook/#shelf-fan-failed","title":"Shelf Fan Failed","text":"

      Impact: Availability

      EMS Event: ses.status.fanError

      The indicated cooling fan or fan module of the shelf has failed. The disks in the shelf might not receive enough cooling airflow, which might result in disk failure.

      Remediation

      Perform the following corrective actions:

      1. Verify that the fan module is fully seated and secured. NOTE: The fan is integrated into the power supply module in some disk shelves.
      2. If the issue persists, replace the fan module.
      3. If the issue still persists, contact NetApp technical support for assistance.
      "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-common-snapshot-failed","title":"SnapMirror Relationship Common Snapshot Failed","text":"

      Impact: Protection

      EMS Event: sms.common.snapshot.failed

      This message occurs when there is a failure in creating a common Snapshot(tm) copy. The SnapMirror\u00ae Sync relationship continues to be in \"in-sync\" status. The latest common Snapshot copy is used for recovery in case the relationship status changes to \"out-of-sync.\" The common Snapshot copy should be created at scheduled intervals to decrease the recovery time of \"out-of-sync\" relationships.

      Remediation

      Create a common snapshot manually by using the \"snapmirror update\" command at the destination.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-initialization-failed","title":"SnapMirror Relationship Initialization Failed","text":"

      Impact: Protection

      EMS Event: smc.snapmir.init.fail

      This message occurs when a SnapMirror\u00ae 'initialize' command fails and no more retries will be attempted.

      Remediation

      Check the reason for the error, take action accordingly, and issue the command again.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-out-of-sync","title":"SnapMirror Relationship Out of Sync","text":"

      Impact: Protection

      EMS Event: sms.status.out.of.sync

      This event occurs when a SnapMirror(R) Sync relationship status changes from \"in-sync\" to \"out-of-sync\". I/O restrictions are imposed on the source volume based on the mode of replication. Client read or write access to the volume is not allowed for relationships of the \"strict-sync-mirror\" policy type. Data protection is affected.

      Remediation

      Check the network connection between the source and destination volumes. Monitor the SnapMirror Sync relationship status using the \"snapmirror show\" command. \"Auto-resync\" attempts to bring the relationship back to the \"in-sync\" status.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-resync-attempt-failed","title":"SnapMirror Relationship Resync Attempt Failed","text":"

      Impact: Protection

      EMS Event: sms.resync.attempt.failed

      This message occurs when a resynchronize operation between the source volume and destination volume fails. The SnapMirror\u00ae Sync relationship is in \"out-of-sync\" status. Data protection is impacted.

      Remediation

      Monitor SnapMirror Sync status using the \"snapmirror show\" command. If the auto-resync attempts fail, bring the relationship back to \"in-sync\" status manually by using the \"snapmirror resync\" command.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-snapshot-is-not-replicated","title":"SnapMirror Relationship Snapshot is not Replicated","text":"

      Impact: Protection

      EMS Event: sms.snap.not.replicated

      This message occurs when a Snapshot(tm) copy for SnapMirror\u00ae Synchronous relationship is not successfully replicated.

      Remediation

      No remediation required. User can trigger another snap create request to create a snapshot that exists on both primary and secondary site.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-automatic-unplanned-failover-completed","title":"SnapMirror active sync Automatic Unplanned Failover Completed","text":"

      Impact: Protection

      EMS Event: smbc.aufo.completed

      This message occurs when the SnapMirror\u00ae active sync automatic unplanned failover operation completes.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-automatic-unplanned-failover-failed","title":"SnapMirror active sync Automatic Unplanned Failover Failed","text":"

      Impact: Protection

      EMS Event: smbc.aufo.failed

      This message occurs when the SnapMirror\u00ae active sync automatic unplanned failover operation fails.

      Remediation

      The automatic unplanned failover will be retried internally. However, operations will be suspended till the failover is complete. If AUFO is failing persistently and the customer wishes to continue servicing IO, they can perform \"snapmirror delete -destination-path destination_path\" followed by \"snapmirror break\" on the volumes. Doing so will affect protection as the relationship will be removed, customer will need to re-establish protection relationship.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-planned-failover-completed","title":"SnapMirror active sync Planned Failover Completed","text":"

      Impact: Protection

      EMS Event: smbc.pfo.completed

      This message occurs when the SnapMirror\u00ae active sync planned failover operation completes.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-planned-failover-failed","title":"SnapMirror active sync Planned Failover Failed","text":"

      Impact: Protection

      EMS Event: smbc.pfo.failed

      This message occurs when the SnapMirror\u00ae active sync planned failover operation fails.

      Remediation

      Determine the cause of the failure by using the \"snapmirror failover show -fields error-reason\" command. If the relationship is out-of-sync, wait till the relationship is brought to in-sync. Else, address the error causing planned failover failure and then retry the \"snapmirror failover start -destination-path destination_path\" command.

      "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-relationship-out-of-sync","title":"SnapMirror active sync Relationship Out of Sync","text":"

      Impact: Protection

      EMS Event: sms.status.out.of.sync.cg

      This message occurs when a SnapMirror for Business Continuity (SMBC) relationship changes status from \"in-sync\" to \"out-of-sync\". Due to this RPO=0 data protection will be disrupted.

      Remediation

      Check the network connection between the source and destination volumes. Monitor the SMBC relationship status by using the \"snapmirror show\" command on the destination, and by using the \"snapmirror list-destinations\" command on the source. Auto-resync will attempt to bring the relationship back to \"in-sync\" status. If the resync fails, verify that all the nodes in the cluster are in quorum and are healthy.

      "},{"location":"resources/ems-alert-runbook/#storage-switch-power-supplies-failed","title":"Storage Switch Power Supplies Failed","text":"

      Impact: Availability

      EMS Event: cluster.switch.pwr.fail

      There is a missing power supply in the cluster switch. Redundancy is reduced, risk of outage with any further power failures.

      Remediation

      Perform the following corrective actions:

      1. Ensure that the power supply mains, which supplies power to the cluster switch, is turned on.
      2. Ensure that the power cord is connected to the power supply.

      Contact NetApp technical support if the issue persists.

      "},{"location":"resources/ems-alert-runbook/#storage-vm-anti-ransomware-monitoring","title":"Storage VM Anti-ransomware Monitoring","text":"

      Impact: Security

      EMS Event: arw.vserver.state

      The anti-ransomware monitoring for the storage VM is disabled.

      Remediation

      Enable anti-ransomware to protect the storage VM.

      "},{"location":"resources/ems-alert-runbook/#storage-vm-stop-succeeded","title":"Storage VM Stop Succeeded","text":"

      Impact: Availability

      EMS Event: vserver.stop.succeeded

      This message occurs when a 'vserver stop' operation succeeds.

      Remediation

      Use 'vserver start' command to start the data access on a storage VM.

      "},{"location":"resources/ems-alert-runbook/#system-cannot-operate-due-to-main-unit-fan-failure","title":"System Cannot Operate Due to Main Unit Fan Failure","text":"

      Impact: Availability

      EMS Event: monitor.fan.critical

      One or more main unit fans have failed, disrupting system operation. This might lead to a potential data loss.

      Remediation

      Replace the failed fans.

      "},{"location":"resources/ems-alert-runbook/#too-many-cifs-authentication","title":"Too Many CIFS Authentication","text":"

      Impact: Availability

      EMS Event: Nblade.cifsManyAuths

      Many authentication negotiations have occurred simultaneously. There are 256 incomplete new session requests from this client.

      Remediation

      Investigate why the client has created 256 or more new connection requests. You might have to contact the vendor of the client or of the application to determine why the error occurred.

      "},{"location":"resources/ems-alert-runbook/#unassigned-disks","title":"Unassigned Disks","text":"

      Impact: Availability

      EMS Event: unowned.disk.reminder

      System has unassigned disks - capacity is being wasted and your system may have some misconfiguration or partial configuration change applied.

      Remediation

      Perform the following corrective actions:

      1. Determine which disks are unassigned by using the \"disk show -n\" command.
      2. Assign the disks to a system by using the \"disk assign\" command.
      "},{"location":"resources/ems-alert-runbook/#unauthorized-user-access-to-admin-share","title":"Unauthorized User Access to Admin Share","text":"

      Impact: Security

      EMS Event: Nblade.vscanBadUserPrivAccess

      A client has attempted to connect to the privileged ONTAP_ADMIN$ share even though their logged-in user is not an allowed user.

      Remediation

      Perform the following corrective actions:

      1. Ensure that the mentioned username and IP address is configured in one of the active Vscan scanner pools.
      2. Check the scanner pool configuration that is currently active by using the \"vserver vscan scanner pool show-active\" command.
      "},{"location":"resources/ems-alert-runbook/#virus-detected","title":"Virus Detected","text":"

      Impact: Availability

      EMS Event: Nblade.vscanVirusDetected

      A Vscan server has reported an error to the storage system. This typically indicates that a virus has been found. However, other errors on the Vscan server can cause this event.

      Client access to the file is denied. The Vscan server might, depending on its settings and configuration, clean the file, quarantine it, or delete it.

      Remediation

      Check the log of the Vscan server reported in the \"syslog\" event to see if it was able to successfully clean, quarantine, or delete the infected file. If it was not able to do so, a system administrator might have to manually delete the file.

      "},{"location":"resources/ems-alert-runbook/#volume-anti-ransomware-monitoring","title":"Volume Anti-ransomware Monitoring","text":"

      Impact: Security

      EMS Event: arw.volume.state

      The anti-ransomware monitoring for the volume is disabling.

      Remediation

      Enable anti-ransomware to protect the storage VM.

      "},{"location":"resources/ems-alert-runbook/#volume-automatic-resizing-succeeded","title":"Volume Automatic Resizing Succeeded","text":"

      Impact: Capacity

      EMS Event: wafl.vol.autoSize.done

      This event occurs when the automatic resizing of a volume is successful. It happens when the \"autosize grow\" option is enabled, and the volume reaches the grow threshold percentage.

      "},{"location":"resources/ems-alert-runbook/#volume-offline","title":"Volume Offline","text":"

      Impact: Availability

      EMS Event: wafl.vvol.offline

      This message indicates that a volume has been taken offline.

      Remediation

      Bring the volume back online.

      "},{"location":"resources/ems-alert-runbook/#volume-restricted","title":"Volume Restricted","text":"

      Impact: Availability

      EMS Event: wafl.vvol.restrict

      This event indicates that a flexible volume is made restricted.

      Remediation

      Bring the volume back online.

      "},{"location":"resources/matrix/","title":"Matrix","text":""},{"location":"resources/matrix/#matrix","title":"Matrix","text":"

      The \u2133atri\u03c7 package provides the matrix.Matrix data-structure for storage, manipulation and transmission of both numeric and non-numeric (string) data. It is utilized by core components of Harvest, including collectors, plugins and exporters. It furthermore serves as an interface between these components, such that \"the left hand does not know what the right hand does\".

      Internally, the Matrix is a collection of metrics (matrix.Metric) and instances (matrix.Instance) in the form of a 2-dimensional array:

      Since we use hash tables for accessing the elements of the array, all metrics and instances added to the matrix must have a unique key. Metrics are typed and contain the numeric data (i.e. rows) of the Matrix. Instances only serve as pointers to the columents of the Matrix, but they also store non-numeric data as labels (*dict.Dict).

      This package is the architectural backbone of Harvest, therefore understanding it is key for an advanced user or contributor.

      "},{"location":"resources/matrix/#basic-usage","title":"Basic Usage","text":""},{"location":"resources/matrix/#initialize","title":"Initialize","text":"

      func matrix.New(name, object string, identifier string) *Matrix\n// always returns successfully pointer to (empty) Matrix \n
      This section describes how to properly initialize a new Matrix instance. Note that if you write a collector, a Matrix instance is already properly initialized for you (as MyCollector.matrix), and if you write a plugin or exporter, it is passed to you from the collector. That means most of the time you don't have to worry about initializing the Matrix.

      matrix.New() requires three arguments: * UUID is by convention the collector name (e.g. MyCollector) if the Matrix comes from a collector, or the collector name and the plugin name concatenated with a . (e.g. MyCollector.MyPlugin) if the Matrix comes from a plugin. * object is a description of the instances of the Matrix. For example, if we collect data about cars and our instances are cars, a good name would be car. * identifier is a unique key used to identify a matrix instance

      Note that identifier should uniquely identify a Matrix instance. This is not a strict requirement, but guarantees that your data is properly handled by exporters.

      "},{"location":"resources/matrix/#example","title":"Example","text":"

      Here is an example from the point of view of a collector:

      import \"github.com/netapp/harvest/v2/pkg/matrix\"\n\nvar myMatrix *matrix.Matrix\n\nmyMatrix = matrix.New(\"CarCollector\", \"car\", \"car\")\n

      Next step is to add metrics and instances to our Matrix.

      "},{"location":"resources/matrix/#add-instances-and-instance-labels","title":"Add instances and instance labels","text":"
      func (x *Matrix) NewInstance(key string) (*Instance, error)\n// returns pointer to a new Instance, or nil with error (if key is not unique)\n

      func (i *Instance) SetLabel(key, value string)\n// always successful, overwrites existing values\n
      func (i *Instance) GetLabel(key) string\n// always returns value, if label is not set, returns empty string\n

      Once we have initialized a Matrix, we can add instances and add labels to our instances.

      "},{"location":"resources/matrix/#example_1","title":"Example","text":"
      var (\n    instance *matrix.Instance\n    err error\n)\nif instance, err = myMatrix.NewInstance(\"SomeCarMark\"); err != nil {\n    return err\n    // or handle err, but beware that instance is nil\n}\ninstance.SetLabel(\"mark\", \"SomeCarMark\")\ninstance.SetLabel(\"color\", \"red\")\ninstance.SetLabel(\"style\", \"coupe\")\n// add as many labels as you like\ninstance.GetLabel(\"color\") // return \"red\"\ninstance.GetLabel(\"owner\") // returns \"\"\n
      "},{"location":"resources/matrix/#add-metrics","title":"Add Metrics","text":"
      func (x *Matrix) NewMetricInt64(key string) (Metric, error)\n// returns pointer to a new MetricInt64, or nil with error (if key is not unique)\n// note that Metric is an interface\n

      Metrics are typed and there are currently 8 types, all can be created with the same signature as above: * MetricUint8 * MetricUint32 * MetricUint64 * MetricInt * MetricInt32 * MetricInt64 * MetricFloat32 * MetricFloat64 * We are able to read from and write to a metric instance using different types (as displayed in the next section), however choosing a type wisely ensures that this is done efficiently and overflow does not occur.

      We can add labels to metrics just like instances. This is usually done when we deal with histograms:

      func (m Metric) SetLabel(key, value string)\n// always successful, overwrites existing values\n
      func (m Metric) GetLabel(key) string\n// always returns value, if label is not set, returns empty string\n

      "},{"location":"resources/matrix/#example_2","title":"Example","text":"

      Continuing our Matrix for collecting car-related data:

      var (\n    speed, length matrix.Metric\n    err error\n)\n\nif speed, err = myMatrix.NewMetricUint32(\"max_speed\"); err != nil {\n    return err\n}\nif length, err = myMatrix.NewMetricFloat32(\"length_in_mm\"); err != nil {\n    return err\n}\n
      "},{"location":"resources/matrix/#write-numeric-data","title":"Write numeric data","text":"

      func (x *Matrix) Reset()\n// flush numeric data from previous poll\n
      func (m Metric) SetValueInt64(i *Instance, v int64) error\nfunc (m Metric) SetValueUint8(i *Instance, v uint8) error\nfunc (m Metric) SetValueUint64(i *Instance, v uint64) error\nfunc (m Metric) SetValueFloat64(i *Instance, v float64) error\nfunc (m Metric) SetValueBytes(i *Instance, v []byte) error\nfunc (m Metric) SetValueString(i *Instance, v []string) error\n// sets the numeric value for the instance i to v\n// returns error if v is invalid (explained below)\n
      func (m Metric) AddValueInt64(i *Instance, v int64) error\n// increments the numeric value for the instance i by v\n// same signatures for all the types defined above\n

      When possible you should reuse a Matrix for each data poll, but to do that, you need to call Reset() to drop old data from the Matrix. It is safe to add new instances and metrics after calling this method.

      The SetValue*() and AddValue*() methods are typed same as the metrics. Even though you are not required to use the same type as the metric, it is the safest and most efficient way.

      Since most collectors get their data as bytes or strings, it is recommended to use the SetValueString() and SetValueBytes() methods.

      These methods return an error if value v can not be converted to the type of the metric. Error is always nil when the type of v matches the type of the metric.

      "},{"location":"resources/matrix/#example_3","title":"Example","text":"

      Continuing with the previous examples:

      if err = myMatrix.Reset(); err != nil {\n    return\n}\n// write numbers to the matrix using the instance and the metrics we have created\n\n// let the metric do the conversion for us\nif err = speed.SetValueString(instance, \"500\"); err != nil {\n    logger.Error(me.Prefix, \"set speed value: \", err)\n}\n// here we ignore err since type is the metric type\nlength.SetValueFloat64(instance, 10000.00)\n\n// safe to add new instances\nvar instance2 matrix.Instance\nif instance2, err = myMatrix.NewInstance(\"SomeOtherCar\"); err != nil {\n    return err\n}\n\n// possible and safe even though speed has type Float32\n} if err = length.SetValueInt64(instance2, 13000); err != nil {\n    logger.Error(me.Prefix, \"set speed value:\", err)\n}\n\n// possible, but will overflow since speed is unsigned\n} if err = speed.SetValueInt64(instance2, -500); err != nil {\n    logger.Error(me.Prefix, \"set length value:\", err)\n}\n
      "},{"location":"resources/matrix/#read-metrics-and-instances","title":"Read metrics and instances","text":"

      In this section we switch gears and look at the Matrix from the point of view of plugins and exporters. Both those components need to read from the Matrix and have no knowledge of its origin or contents.

      func (x *Matrix) GetMetrics() map[string]Metric\n// returns all metrics in the Matrix\n
      func (x *Matrix) GetInstances() map[string]*Instance\n// returns all instances in the Matrix\n

      Usually we will do a nested loop with these two methods to read all data in the Matrix. See examples below.

      "},{"location":"resources/matrix/#example-iterate-over-instances","title":"Example: Iterate over instances","text":"

      In this example the method PrintKeys() will iterate over a Matrix and print all metric and instance keys.

      func PrintKeys(x *matrix.Matrix) {\n    for instanceKey, _ := range x.GetInstances() {\n        fmt.Println(\"instance key=\", instanceKey)\n    }\n}\n
      "},{"location":"resources/matrix/#example-read-instance-labels","title":"Example: Read instance labels","text":"

      Each instance has a set of labels. We can iterate over these labels with the GetLabel() and GetLabels() method. In this example, we write a function that prints all labels of an instance:

      func PrintLabels(instance *matrix.Instance) {\n    for label, value, := range instance.GetLabels().Map() {\n        fmt.Printf(\"%s=%s\\n\", label, value)\n    }\n}\n
      "},{"location":"resources/matrix/#example-read-metric-values-labels","title":"Example: Read metric values labels","text":"

      Similar to the SetValue* and AddValue* methods, you can choose a type when reading from a metric. If you don't know the type of the metric, it is safe to read it as a string. In this example, we write a function that prints the value of a metric for all instances in a Matrix:

      func PrintMetricValues(x *matrix.Matrix, m matrix.Metric) {\n    for key, instance := range x.GetInstances() {\n        if value, has := m.GetValueString(instance) {\n            fmt.Printf(\"instance %s = %s\\n\", key, value)\n        } else {\n            fmt.Printf(\"instance %s has no value\\n\", key)\n        }\n    }\n}\n
      "},{"location":"resources/power-algorithm/","title":"Power Algorithm","text":"

      Gathering power metrics requires a cluster with:

      • ONTAP versions 9.6+
      • REST enabled, even when using the ZAPI collector. After granting REST permissions, restart Harvest.

      REST is required because it is the only way to collect chassis field-replaceable-unit (FRU) information via the REST API /api/private/cli/system/chassis/fru.

      "},{"location":"resources/power-algorithm/#how-does-harvest-calculate-cluster-power","title":"How does Harvest calculate cluster power?","text":"

      Cluster power is the sum of a cluster's node(s) power + the sum of attached disk shelve(s) power.

      Redundant power supplies (PSU) load-share the total load. With n PSUs, each PSU does roughly (1/n) the work (the actual amount is slightly more than a single PSU due to additional fans.)

      "},{"location":"resources/power-algorithm/#node-power","title":"Node power","text":"

      Node power is calculated by collecting power supply unit (PSU) power, as reported by REST /api/private/cli/system/environment/sensors or by ZAPI environment-sensors-get-iter.

      When a power supply is shared between controllers, the PSU's power will be evenly divided across the controllers due to load-sharing.

      For example:

      • FAS2750 models have two power supplies that power both controllers. Each PSU is shared between the two controllers.
      • A800 models have four power supplies. PSU1 and PSU2 power Controller1 and PSU3 and PSU4 power Controller2. Each PSU provides power to a single controller.

      Harvest determines whether a PSU is shared between controllers by consulting the connected_nodes of each PSU, as reported by ONTAP via /api/private/cli/system/chassis/fru

      "},{"location":"resources/power-algorithm/#disk-shelf-power","title":"Disk shelf power","text":"

      Disk shelf power is calculated by collecting psu.power_drawn, as reported by REST, via /api/storage/shelves or sensor-reading, as reported by ZAPI storage-shelf-info-get-iter.

      The power for embedded shelves is ignored, since that power is already accounted for in the controller's power draw.

      "},{"location":"resources/power-algorithm/#examples","title":"Examples","text":""},{"location":"resources/power-algorithm/#fas2750","title":"FAS2750","text":"
      # Power Metrics for 10.61.183.200\n\n## ONTAP version NetApp Release 9.8P16: Fri Dec 02 02:05:05 UTC 2022\n\n## Nodes\nsystem show\n       Node         |  Model  | SerialNumber  \n----------------------+---------+---------------\ncie-na2750-g1344-01 | FAS2750 | 621841000123  \ncie-na2750-g1344-02 | FAS2750 | 621841000124\n\n## Chassis\nsystem chassis fru show\n ChassisId   |      Name       |         Fru         |    Type    | Status | NumNodes |              ConnectedNodes               \n---------------+-----------------+---------------------+------------+--------+----------+-------------------------------------------\n021827030435 | 621841000123    | cie-na2750-g1344-01 | controller | ok     |        1 | cie-na2750-g1344-01                       \n021827030435 | 621841000124    | cie-na2750-g1344-02 | controller | ok     |        1 | cie-na2750-g1344-02                       \n021827030435 | PSQ094182201794 | PSU2 FRU            | psu        | ok     |        2 | cie-na2750-g1344-02, cie-na2750-g1344-01  \n021827030435 | PSQ094182201797 | PSU1 FRU            | psu        | ok     |        2 | cie-na2750-g1344-02, cie-na2750-g1344-01\n\n## Sensors\nsystem environment sensors show\n(filtered by power, voltage, current)\n       Node         |     Name      |  Type   | State  | Value | Units  \n----------------------+---------------+---------+--------+-------+--------\ncie-na2750-g1344-01 | PSU1 12V Curr | current | normal |  9920 | mA     \ncie-na2750-g1344-01 | PSU1 12V      | voltage | normal | 12180 | mV     \ncie-na2750-g1344-01 | PSU1 5V Curr  | current | normal |  4490 | mA     \ncie-na2750-g1344-01 | PSU1 5V       | voltage | normal |  5110 | mV     \ncie-na2750-g1344-01 | PSU2 12V Curr | current | normal |  9140 | mA     \ncie-na2750-g1344-01 | PSU2 12V      | voltage | normal | 12100 | mV     \ncie-na2750-g1344-01 | PSU2 5V Curr  | current | normal |  4880 | mA     \ncie-na2750-g1344-01 | PSU2 5V       | voltage | normal |  5070 | mV     \ncie-na2750-g1344-02 | PSU1 12V Curr | current | normal |  9920 | mA     \ncie-na2750-g1344-02 | PSU1 12V      | voltage | normal | 12180 | mV     \ncie-na2750-g1344-02 | PSU1 5V Curr  | current | normal |  4330 | mA     \ncie-na2750-g1344-02 | PSU1 5V       | voltage | normal |  5110 | mV     \ncie-na2750-g1344-02 | PSU2 12V Curr | current | normal |  9170 | mA     \ncie-na2750-g1344-02 | PSU2 12V      | voltage | normal | 12100 | mV     \ncie-na2750-g1344-02 | PSU2 5V Curr  | current | normal |  4720 | mA     \ncie-na2750-g1344-02 | PSU2 5V       | voltage | normal |  5070 | mV\n\n## Shelf PSUs\nstorage shelf show\nShelf | ProductId | ModuleType | PSUId | PSUIsEnabled | PSUPowerDrawn | Embedded  \n------+-----------+------------+-------+--------------+---------------+---------\n  1.0 | DS224-12  | iom12e     | 1,2   | true,true    | 1397,1318     | true\n\n### Controller Power From Sum(InVoltage * InCurrent)/NumNodes\nPower: 256W\n
      "},{"location":"resources/power-algorithm/#aff-a800","title":"AFF A800","text":"
      # Power Metrics for 10.61.124.110\n\n## ONTAP version NetApp Release 9.13.1P1: Tue Jul 25 10:19:28 UTC 2023\n\n## Nodes\nsystem show\n  Node    |  Model   | SerialNumber  \n----------+----------+-------------\na800-1-01 | AFF-A800 | 941825000071  \na800-1-02 | AFF-A800 | 941825000072\n\n## Chassis\nsystem chassis fru show\n   ChassisId    |      Name      |    Fru    |    Type    | Status | NumNodes | ConnectedNodes  \n----------------+----------------+-----------+------------+--------+----------+---------------\nSHFFG1826000154 | 941825000071   | a800-1-01 | controller | ok     |        1 | a800-1-01       \nSHFFG1826000154 | 941825000072   | a800-1-02 | controller | ok     |        1 | a800-1-02       \nSHFFG1826000154 | EEQT1822002800 | PSU1 FRU  | psu        | ok     |        1 | a800-1-02       \nSHFFG1826000154 | EEQT1822002804 | PSU2 FRU  | psu        | ok     |        1 | a800-1-02       \nSHFFG1826000154 | EEQT1822002805 | PSU2 FRU  | psu        | ok     |        1 | a800-1-01       \nSHFFG1826000154 | EEQT1822002806 | PSU1 FRU  | psu        | ok     |        1 | a800-1-01\n\n## Sensors\nsystem environment sensors show\n(filtered by power, voltage, current)\n  Node    |     Name      |  Type   | State  | Value | Units  \n----------+---------------+---------+--------+-------+------\na800-1-01 | PSU1 Power In | unknown | normal |   376 | W      \na800-1-01 | PSU2 Power In | unknown | normal |   411 | W      \na800-1-02 | PSU1 Power In | unknown | normal |   383 | W      \na800-1-02 | PSU2 Power In | unknown | normal |   433 | W\n\n## Shelf PSUs\nstorage shelf show\nShelf |  ProductId  | ModuleType | PSUId | PSUIsEnabled | PSUPowerDrawn | Embedded  \n------+-------------+------------+-------+--------------+---------------+---------\n  1.0 | FS4483PSM3E | psm3e      |       |              |               | true      \n\n### Controller Power From Sum(InPower sensors)\nPower: 1603W\n
      "},{"location":"resources/rest-perf-metrics/","title":"REST Perf Metrics","text":"

      This document describes implementation details about ONTAP's REST performance metrics endpoints, including how we built the Harvest RESTPerf collectors.

      Warning

      These are implementation details about ONTAP's REST performance metrics. You do not need to understand any of this to use Harvest. If you want to know how to use or configure Harvest's REST collectors, checkout the Rest Collector documentation instead. If you're interested in the gory details. Read on.

      "},{"location":"resources/rest-perf-metrics/#introduction","title":"Introduction","text":"

      ONTAP REST metrics were introduced in ONTAP 9.11.1 and included parity with Harvest-collected ZAPI performance metrics by ONTAP 9.12.1.

      "},{"location":"resources/rest-perf-metrics/#performance-rest-queries","title":"Performance REST queries","text":"

      Mapping table

      ZAPI REST Comment perf-object-counter-list-info /api/cluster/counter/tables returns counter tables and schemas perf-object-instance-list-info-iter /api/cluster/counter/tables/{name}/rows returns instances and counter values perf-object-get-instances /api/cluster/counter/tables/{name}/rows returns instances and counter values

      Performance REST responses include properties and counters. Counters are metric-like, while properties include instance attributes.

      "},{"location":"resources/rest-perf-metrics/#examples","title":"Examples","text":""},{"location":"resources/rest-perf-metrics/#ask-ontap-for-all-resources-that-report-performance-metrics","title":"Ask ONTAP for all resources that report performance metrics","text":"
      curl 'https://$clusterIP/api/cluster/counter/tables'\n
      Response

      {\n  \"records\": [\n    {\n      \"name\": \"copy_manager\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/copy_manager\"\n        }\n      }\n    },\n    {\n      \"name\": \"copy_manager:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/copy_manager%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"disk\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/disk\"\n        }\n      }\n    },\n    {\n      \"name\": \"disk:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/disk%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"disk:raid_group\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/disk%3Araid_group\"\n        }\n      }\n    },\n    {\n      \"name\": \"external_cache\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/external_cache\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp_lif\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp_lif\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp_lif:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp_lif%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp_lif:port\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp_lif%3Aport\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp_lif:svm\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp_lif%3Asvm\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcvi\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcvi\"\n        }\n      }\n    },\n    {\n      \"name\": \"headroom_aggregate\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/headroom_aggregate\"\n        }\n      }\n    },\n    {\n      \"name\": \"headroom_cpu\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/headroom_cpu\"\n        }\n      }\n    },\n    {\n      \"name\": \"host_adapter\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/host_adapter\"\n        }\n      }\n    },\n    {\n      \"name\": \"iscsi_lif\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/iscsi_lif\"\n        }\n      }\n    },\n    {\n      \"name\": \"iscsi_lif:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/iscsi_lif%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"iscsi_lif:svm\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/iscsi_lif%3Asvm\"\n        }\n      }\n    },\n    {\n      \"name\": \"lif\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lif\"\n        }\n      }\n    },\n    {\n      \"name\": \"lif:svm\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lif%3Asvm\"\n        }\n      }\n    },\n    {\n      \"name\": \"lun\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lun\"\n        }\n      }\n    },\n    {\n      \"name\": \"lun:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lun%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"lun:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lun%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"namespace\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/namespace\"\n        }\n      }\n    },\n    {\n      \"name\": \"namespace:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/namespace%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"nfs_v4_diag\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nfs_v4_diag\"\n        }\n      }\n    },\n    {\n      \"name\": \"nic_common\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nic_common\"\n        }\n      }\n    },\n    {\n      \"name\": \"nvmf_lif\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nvmf_lif\"\n        }\n      }\n    },\n    {\n      \"name\": \"nvmf_lif:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nvmf_lif%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"nvmf_lif:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nvmf_lif%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"nvmf_lif:port\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nvmf_lif%3Aport\"\n        }\n      }\n    },\n    {\n      \"name\": \"object_store_client_op\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/object_store_client_op\"\n        }\n      }\n    },\n    {\n      \"name\": \"path\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/path\"\n        }\n      }\n    },\n    {\n      \"name\": \"processor\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/processor\"\n        }\n      }\n    },\n    {\n      \"name\": \"processor:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/processor%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos:policy_group\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos%3Apolicy_group\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos_detail\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos_detail\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos_detail_volume\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos_detail_volume\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos_volume\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos_volume\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos_volume:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos_volume%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"qtree\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qtree\"\n        }\n      }\n    },\n    {\n      \"name\": \"qtree:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qtree%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_cifs\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_cifs\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_cifs:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_cifs%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_cifs:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_cifs%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v3\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v3\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v3:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v3%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v3:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v3%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v4\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v4\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v41\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v41\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v41:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v41%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v41:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v41%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v42\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v42\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v42:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v42%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v42:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v42%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v4:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v4%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v4:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v4%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"system\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system\"\n        }\n      }\n    },\n    {\n      \"name\": \"system:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"system:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"token_manager\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/token_manager\"\n        }\n      }\n    },\n    {\n      \"name\": \"volume\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/volume\"\n        }\n      }\n    },\n    {\n      \"name\": \"volume:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/volume%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"volume:svm\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/volume%3Asvm\"\n        }\n      }\n    },\n    {\n      \"name\": \"wafl\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/wafl\"\n        }\n      }\n    },\n    {\n      \"name\": \"wafl_comp_aggr_vol_bin\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/wafl_comp_aggr_vol_bin\"\n        }\n      }\n    },\n    {\n      \"name\": \"wafl_hya_per_aggregate\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/wafl_hya_per_aggregate\"\n        }\n      }\n    },\n    {\n      \"name\": \"wafl_hya_sizer\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/wafl_hya_sizer\"\n        }\n      }\n    }\n  ],\n  \"num_records\": 71,\n  \"_links\": {\n    \"self\": {\n      \"href\": \"/api/cluster/counter/tables/\"\n    }\n  }\n}\n

      "},{"location":"resources/rest-perf-metrics/#node-performance-metrics-metadata","title":"Node performance metrics metadata","text":"

      Ask ONTAP to return the schema for system:node. This will include the name, description, and metadata for all counters associated with system:node.

      curl 'https://$clusterIP/api/cluster/counter/tables/system:node?return_records=true'\n
      Response

      {\n  \"name\": \"system:node\",\n  \"description\": \"The System table reports general system activity. This includes global throughput for the main services, I/O latency, and CPU activity. The alias name for system:node is system_node.\",\n  \"counter_schemas\": [\n    {\n      \"name\": \"average_processor_busy_percent\",\n      \"description\": \"Average processor utilization across all processors in the system\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"cifs_ops\",\n      \"description\": \"Number of CIFS operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"cp\",\n      \"description\": \"CP time rate\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"cp_time\",\n      \"description\": \"Processor time in CP\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"cpu_busy\",\n      \"description\": \"System CPU resource utilization. Returns a computed percentage for the default CPU field. Basically computes a 'cpu usage summary' value which indicates how 'busy' the system is based upon the most heavily utilized domain. The idea is to determine the amount of available CPU until we're limited by either a domain maxing out OR we exhaust all available idle CPU cycles, whichever occurs first.\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"cpu_elapsed_time\",\n      \"description\": \"Elapsed time since boot\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"disk_data_read\",\n      \"description\": \"Number of disk kilobytes (KB) read per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"disk_data_written\",\n      \"description\": \"Number of disk kilobytes (KB) written per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"domain_busy\",\n      \"description\": \"Array of processor time in percentage spent in various domains\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"domain_shared\",\n      \"description\": \"Array of processor time in percentage spent in various shared domains\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"dswitchto_cnt\",\n      \"description\": \"Array of processor time in percentage spent in domain switch\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"fcp_data_received\",\n      \"description\": \"Number of FCP kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"fcp_data_sent\",\n      \"description\": \"Number of FCP kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"fcp_ops\",\n      \"description\": \"Number of FCP operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"hard_switches\",\n      \"description\": \"Number of context switches per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"hdd_data_read\",\n      \"description\": \"Number of HDD Disk kilobytes (KB) read per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"hdd_data_written\",\n      \"description\": \"Number of HDD kilobytes (KB) written per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"idle\",\n      \"description\": \"Processor idle rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"idle_time\",\n      \"description\": \"Processor idle time\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"instance_name\",\n      \"description\": \"Node name\",\n      \"type\": \"string\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"interrupt\",\n      \"description\": \"Processor interrupt rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"interrupt_in_cp\",\n      \"description\": \"Processor interrupt rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cp_time\"\n      }\n    },\n    {\n      \"name\": \"interrupt_in_cp_time\",\n      \"description\": \"Processor interrupt in CP time\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"interrupt_num\",\n      \"description\": \"Processor interrupt number\",\n      \"type\": \"delta\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"interrupt_num_in_cp\",\n      \"description\": \"Number of processor interrupts in CP\",\n      \"type\": \"delta\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"interrupt_time\",\n      \"description\": \"Processor interrupt time\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"intr_cnt\",\n      \"description\": \"Array of interrupt count per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"intr_cnt_ipi\",\n      \"description\": \"IPI interrupt count per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"intr_cnt_msec\",\n      \"description\": \"Millisecond interrupt count per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"intr_cnt_total\",\n      \"description\": \"Total interrupt count per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"iscsi_data_received\",\n      \"description\": \"iSCSI kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"iscsi_data_sent\",\n      \"description\": \"iSCSI kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"iscsi_ops\",\n      \"description\": \"Number of iSCSI operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"memory\",\n      \"description\": \"Total memory in megabytes (MB)\",\n      \"type\": \"raw\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"network_data_received\",\n      \"description\": \"Number of network kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"network_data_sent\",\n      \"description\": \"Number of network kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nfs_ops\",\n      \"description\": \"Number of NFS operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"non_interrupt\",\n      \"description\": \"Processor non-interrupt rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"non_interrupt_time\",\n      \"description\": \"Processor non-interrupt time\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"num_processors\",\n      \"description\": \"Number of active processors in the system\",\n      \"type\": \"raw\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"nvme_fc_data_received\",\n      \"description\": \"NVMe/FC kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_fc_data_sent\",\n      \"description\": \"NVMe/FC kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_fc_ops\",\n      \"description\": \"NVMe/FC operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"nvme_roce_data_received\",\n      \"description\": \"NVMe/RoCE kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_roce_data_sent\",\n      \"description\": \"NVMe/RoCE kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_roce_ops\",\n      \"description\": \"NVMe/RoCE operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"nvme_tcp_data_received\",\n      \"description\": \"NVMe/TCP kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_tcp_data_sent\",\n      \"description\": \"NVMe/TCP kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_tcp_ops\",\n      \"description\": \"NVMe/TCP operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"other_data\",\n      \"description\": \"Other throughput\",\n      \"type\": \"rate\",\n      \"unit\": \"b_per_sec\"\n    },\n    {\n      \"name\": \"other_latency\",\n      \"description\": \"Average latency for all other operations in the system in microseconds\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"other_ops\"\n      }\n    },\n    {\n      \"name\": \"other_ops\",\n      \"description\": \"All other operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"partner_data_received\",\n      \"description\": \"SCSI Partner kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"partner_data_sent\",\n      \"description\": \"SCSI Partner kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"processor_plevel\",\n      \"description\": \"Processor plevel rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"processor_plevel_time\",\n      \"description\": \"Processor plevel rate percentage\",\n      \"type\": \"delta\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"read_data\",\n      \"description\": \"Read throughput\",\n      \"type\": \"rate\",\n      \"unit\": \"b_per_sec\"\n    },\n    {\n      \"name\": \"read_latency\",\n      \"description\": \"Average latency for all read operations in the system in microseconds\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"read_ops\"\n      }\n    },\n    {\n      \"name\": \"read_ops\",\n      \"description\": \"Read operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"sk_switches\",\n      \"description\": \"Number of sk switches per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"ssd_data_read\",\n      \"description\": \"Number of SSD Disk kilobytes (KB) read per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"ssd_data_written\",\n      \"description\": \"Number of SSD Disk kilobytes (KB) written per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"sys_read_data\",\n      \"description\": \"Network and FCP kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"sys_total_data\",\n      \"description\": \"Network and FCP kilobytes (KB) received and sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"sys_write_data\",\n      \"description\": \"Network and FCP kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"tape_data_read\",\n      \"description\": \"Tape bytes read per millisecond\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"tape_data_written\",\n      \"description\": \"Tape bytes written per millisecond\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"time\",\n      \"description\": \"Time in seconds since the Epoch (00:00:00 UTC January 1 1970)\",\n      \"type\": \"raw\",\n      \"unit\": \"sec\"\n    },\n    {\n      \"name\": \"time_per_interrupt\",\n      \"description\": \"Processor time per interrupt\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"interrupt_num\"\n      }\n    },\n    {\n      \"name\": \"time_per_interrupt_in_cp\",\n      \"description\": \"Processor time per interrupt in CP\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"interrupt_num_in_cp\"\n      }\n    },\n    {\n      \"name\": \"total_data\",\n      \"description\": \"Total throughput in bytes\",\n      \"type\": \"rate\",\n      \"unit\": \"b_per_sec\"\n    },\n    {\n      \"name\": \"total_latency\",\n      \"description\": \"Average latency for all operations in the system in microseconds\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"total_ops\"\n      }\n    },\n    {\n      \"name\": \"total_ops\",\n      \"description\": \"Total number of operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"total_processor_busy\",\n      \"description\": \"Total processor utilization of all processors in the system\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"total_processor_busy_time\",\n      \"description\": \"Total processor time of all processors in the system\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"uptime\",\n      \"description\": \"Time in seconds that the system has been up\",\n      \"type\": \"raw\",\n      \"unit\": \"sec\"\n    },\n    {\n      \"name\": \"wafliron\",\n      \"description\": \"Wafliron counters\",\n      \"type\": \"delta\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"write_data\",\n      \"description\": \"Write throughput\",\n      \"type\": \"rate\",\n      \"unit\": \"b_per_sec\"\n    },\n    {\n      \"name\": \"write_latency\",\n      \"description\": \"Average latency for all write operations in the system in microseconds\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"write_ops\"\n      }\n    },\n    {\n      \"name\": \"write_ops\",\n      \"description\": \"Write operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    }\n  ],\n  \"_links\": {\n    \"self\": {\n      \"href\": \"/api/cluster/counter/tables/system:node\"\n    }\n  }\n}\n

      "},{"location":"resources/rest-perf-metrics/#node-performance-metrics-with-all-instances-properties-and-counters","title":"Node performance metrics with all instances, properties, and counters","text":"

      Ask ONTAP to return all instances of system:node. For each system:node include all of that node's properties and performance metrics.

      curl 'https://$clusterIP/api/cluster/counter/tables/system:node/rows?fields=*&return_records=true'\n
      Response

      {\n  \"records\": [\n    {\n      \"counter_table\": {\n        \"name\": \"system:node\"\n      },\n      \"id\": \"umeng-aff300-01:28e14eab-0580-11e8-bd9d-00a098d39e12\",\n      \"properties\": [\n        {\n          \"name\": \"node.name\",\n          \"value\": \"umeng-aff300-01\"\n        },\n        {\n          \"name\": \"system_model\",\n          \"value\": \"AFF-A300\"\n        },\n        {\n          \"name\": \"ontap_version\",\n          \"value\": \"NetApp Release R9.12.1xN_221108_1315: Tue Nov  8 15:32:25 EST 2022 \"\n        },\n        {\n          \"name\": \"compile_flags\",\n          \"value\": \"1\"\n        },\n        {\n          \"name\": \"serial_no\",\n          \"value\": \"721802000260\"\n        },\n        {\n          \"name\": \"system_id\",\n          \"value\": \"0537124012\"\n        },\n        {\n          \"name\": \"hostname\",\n          \"value\": \"umeng-aff300-01\"\n        },\n        {\n          \"name\": \"name\",\n          \"value\": \"umeng-aff300-01\"\n        },\n        {\n          \"name\": \"uuid\",\n          \"value\": \"28e14eab-0580-11e8-bd9d-00a098d39e12\"\n        }\n      ],\n      \"counters\": [\n        {\n          \"name\": \"memory\",\n          \"value\": 88766\n        },\n        {\n          \"name\": \"nfs_ops\",\n          \"value\": 15991465\n        },\n        {\n          \"name\": \"cifs_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"fcp_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"iscsi_ops\",\n          \"value\": 355884195\n        },\n        {\n          \"name\": \"nvme_fc_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"network_data_received\",\n          \"value\": 33454266379\n        },\n        {\n          \"name\": \"network_data_sent\",\n          \"value\": 9938586739\n        },\n        {\n          \"name\": \"fcp_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"fcp_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"iscsi_data_received\",\n          \"value\": 4543696942\n        },\n        {\n          \"name\": \"iscsi_data_sent\",\n          \"value\": 3058795391\n        },\n        {\n          \"name\": \"nvme_fc_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_fc_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"partner_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"partner_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"sys_read_data\",\n          \"value\": 33454266379\n        },\n        {\n          \"name\": \"sys_write_data\",\n          \"value\": 9938586739\n        },\n        {\n          \"name\": \"sys_total_data\",\n          \"value\": 43392853118\n        },\n        {\n          \"name\": \"disk_data_read\",\n          \"value\": 32083838540\n        },\n        {\n          \"name\": \"disk_data_written\",\n          \"value\": 21102507352\n        },\n        {\n          \"name\": \"hdd_data_read\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"hdd_data_written\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"ssd_data_read\",\n          \"value\": 32083838540\n        },\n        {\n          \"name\": \"ssd_data_written\",\n          \"value\": 21102507352\n        },\n        {\n          \"name\": \"tape_data_read\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"tape_data_written\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"read_ops\",\n          \"value\": 33495530\n        },\n        {\n          \"name\": \"write_ops\",\n          \"value\": 324699398\n        },\n        {\n          \"name\": \"other_ops\",\n          \"value\": 13680732\n        },\n        {\n          \"name\": \"total_ops\",\n          \"value\": 371875660\n        },\n        {\n          \"name\": \"read_latency\",\n          \"value\": 14728140707\n        },\n        {\n          \"name\": \"write_latency\",\n          \"value\": 1568830328022\n        },\n        {\n          \"name\": \"other_latency\",\n          \"value\": 2132691612\n        },\n        {\n          \"name\": \"total_latency\",\n          \"value\": 1585691160341\n        },\n        {\n          \"name\": \"read_data\",\n          \"value\": 3212301497187\n        },\n        {\n          \"name\": \"write_data\",\n          \"value\": 4787509093524\n        },\n        {\n          \"name\": \"other_data\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"total_data\",\n          \"value\": 7999810590711\n        },\n        {\n          \"name\": \"cpu_busy\",\n          \"value\": 790347800332\n        },\n        {\n          \"name\": \"cpu_elapsed_time\",\n          \"value\": 3979034040025\n        },\n        {\n          \"name\": \"average_processor_busy_percent\",\n          \"value\": 788429907770\n        },\n        {\n          \"name\": \"total_processor_busy\",\n          \"value\": 12614878524320\n        },\n        {\n          \"name\": \"total_processor_busy_time\",\n          \"value\": 12614878524320\n        },\n        {\n          \"name\": \"num_processors\",\n          \"value\": 16\n        },\n        {\n          \"name\": \"interrupt_time\",\n          \"value\": 118435504138\n        },\n        {\n          \"name\": \"interrupt\",\n          \"value\": 118435504138\n        },\n        {\n          \"name\": \"interrupt_num\",\n          \"value\": 1446537540\n        },\n        {\n          \"name\": \"time_per_interrupt\",\n          \"value\": 118435504138\n        },\n        {\n          \"name\": \"non_interrupt_time\",\n          \"value\": 12496443020182\n        },\n        {\n          \"name\": \"non_interrupt\",\n          \"value\": 12496443020182\n        },\n        {\n          \"name\": \"idle_time\",\n          \"value\": 51049666116080\n        },\n        {\n          \"name\": \"idle\",\n          \"value\": 51049666116080\n        },\n        {\n          \"name\": \"cp_time\",\n          \"value\": 221447740301\n        },\n        {\n          \"name\": \"cp\",\n          \"value\": 221447740301\n        },\n        {\n          \"name\": \"interrupt_in_cp_time\",\n          \"value\": 7969316828\n        },\n        {\n          \"name\": \"interrupt_in_cp\",\n          \"value\": 7969316828\n        },\n        {\n          \"name\": \"interrupt_num_in_cp\",\n          \"value\": 1639345044\n        },\n        {\n          \"name\": \"time_per_interrupt_in_cp\",\n          \"value\": 7969316828\n        },\n        {\n          \"name\": \"sk_switches\",\n          \"value\": 3830419593\n        },\n        {\n          \"name\": \"hard_switches\",\n          \"value\": 2786999477\n        },\n        {\n          \"name\": \"intr_cnt_msec\",\n          \"value\": 3978648113\n        },\n        {\n          \"name\": \"intr_cnt_ipi\",\n          \"value\": 1709054\n        },\n        {\n          \"name\": \"intr_cnt_total\",\n          \"value\": 1215253490\n        },\n        {\n          \"name\": \"time\",\n          \"value\": 1677516216\n        },\n        {\n          \"name\": \"uptime\",\n          \"value\": 3978648\n        },\n        {\n          \"name\": \"processor_plevel_time\",\n          \"values\": [\n            3405835479577,\n            2628275207938,\n            1916273074545,\n            1366761457118,\n            964863281216,\n            676002919489,\n            472533086045,\n            331487674159,\n            234447654307,\n            167247803300,\n            120098535891,\n            86312126550,\n            61675398266,\n            43549889374,\n            30176461104,\n            19891286233,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"0_CPU\",\n            \"1_CPU\",\n            \"2_CPU\",\n            \"3_CPU\",\n            \"4_CPU\",\n            \"5_CPU\",\n            \"6_CPU\",\n            \"7_CPU\",\n            \"8_CPU\",\n            \"9_CPU\",\n            \"10_CPU\",\n            \"11_CPU\",\n            \"12_CPU\",\n            \"13_CPU\",\n            \"14_CPU\",\n            \"15_CPU\",\n            \"16_CPU\",\n            \"17_CPU\",\n            \"18_CPU\",\n            \"19_CPU\",\n            \"20_CPU\",\n            \"21_CPU\",\n            \"22_CPU\",\n            \"23_CPU\",\n            \"24_CPU\",\n            \"25_CPU\",\n            \"26_CPU\",\n            \"27_CPU\",\n            \"28_CPU\",\n            \"29_CPU\",\n            \"30_CPU\",\n            \"31_CPU\",\n            \"32_CPU\",\n            \"33_CPU\",\n            \"34_CPU\",\n            \"35_CPU\",\n            \"36_CPU\",\n            \"37_CPU\",\n            \"38_CPU\",\n            \"39_CPU\",\n            \"40_CPU\",\n            \"41_CPU\",\n            \"42_CPU\",\n            \"43_CPU\",\n            \"44_CPU\",\n            \"45_CPU\",\n            \"46_CPU\",\n            \"47_CPU\",\n            \"48_CPU\",\n            \"49_CPU\",\n            \"50_CPU\",\n            \"51_CPU\",\n            \"52_CPU\",\n            \"53_CPU\",\n            \"54_CPU\",\n            \"55_CPU\",\n            \"56_CPU\",\n            \"57_CPU\",\n            \"58_CPU\",\n            \"59_CPU\",\n            \"60_CPU\",\n            \"61_CPU\",\n            \"62_CPU\",\n            \"63_CPU\",\n            \"64_CPU\",\n            \"65_CPU\",\n            \"66_CPU\",\n            \"67_CPU\",\n            \"68_CPU\",\n            \"69_CPU\",\n            \"70_CPU\",\n            \"71_CPU\",\n            \"72_CPU\",\n            \"73_CPU\",\n            \"74_CPU\",\n            \"75_CPU\",\n            \"76_CPU\",\n            \"77_CPU\",\n            \"78_CPU\",\n            \"79_CPU\",\n            \"80_CPU\",\n            \"81_CPU\",\n            \"82_CPU\",\n            \"83_CPU\",\n            \"84_CPU\",\n            \"85_CPU\",\n            \"86_CPU\",\n            \"87_CPU\",\n            \"88_CPU\",\n            \"89_CPU\",\n            \"90_CPU\",\n            \"91_CPU\",\n            \"92_CPU\",\n            \"93_CPU\",\n            \"94_CPU\",\n            \"95_CPU\",\n            \"96_CPU\",\n            \"97_CPU\",\n            \"98_CPU\",\n            \"99_CPU\",\n            \"100_CPU\",\n            \"101_CPU\",\n            \"102_CPU\",\n            \"103_CPU\",\n            \"104_CPU\",\n            \"105_CPU\",\n            \"106_CPU\",\n            \"107_CPU\",\n            \"108_CPU\",\n            \"109_CPU\",\n            \"110_CPU\",\n            \"111_CPU\",\n            \"112_CPU\",\n            \"113_CPU\",\n            \"114_CPU\",\n            \"115_CPU\",\n            \"116_CPU\",\n            \"117_CPU\",\n            \"118_CPU\",\n            \"119_CPU\",\n            \"120_CPU\",\n            \"121_CPU\",\n            \"122_CPU\",\n            \"123_CPU\",\n            \"124_CPU\",\n            \"125_CPU\",\n            \"126_CPU\",\n            \"127_CPU\"\n          ]\n        },\n        {\n          \"name\": \"processor_plevel\",\n          \"values\": [\n            3405835479577,\n            2628275207938,\n            1916273074545,\n            1366761457118,\n            964863281216,\n            676002919489,\n            472533086045,\n            331487674159,\n            234447654307,\n            167247803300,\n            120098535891,\n            86312126550,\n            61675398266,\n            43549889374,\n            30176461104,\n            19891286233,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"0_CPU\",\n            \"1_CPU\",\n            \"2_CPU\",\n            \"3_CPU\",\n            \"4_CPU\",\n            \"5_CPU\",\n            \"6_CPU\",\n            \"7_CPU\",\n            \"8_CPU\",\n            \"9_CPU\",\n            \"10_CPU\",\n            \"11_CPU\",\n            \"12_CPU\",\n            \"13_CPU\",\n            \"14_CPU\",\n            \"15_CPU\",\n            \"16_CPU\",\n            \"17_CPU\",\n            \"18_CPU\",\n            \"19_CPU\",\n            \"20_CPU\",\n            \"21_CPU\",\n            \"22_CPU\",\n            \"23_CPU\",\n            \"24_CPU\",\n            \"25_CPU\",\n            \"26_CPU\",\n            \"27_CPU\",\n            \"28_CPU\",\n            \"29_CPU\",\n            \"30_CPU\",\n            \"31_CPU\",\n            \"32_CPU\",\n            \"33_CPU\",\n            \"34_CPU\",\n            \"35_CPU\",\n            \"36_CPU\",\n            \"37_CPU\",\n            \"38_CPU\",\n            \"39_CPU\",\n            \"40_CPU\",\n            \"41_CPU\",\n            \"42_CPU\",\n            \"43_CPU\",\n            \"44_CPU\",\n            \"45_CPU\",\n            \"46_CPU\",\n            \"47_CPU\",\n            \"48_CPU\",\n            \"49_CPU\",\n            \"50_CPU\",\n            \"51_CPU\",\n            \"52_CPU\",\n            \"53_CPU\",\n            \"54_CPU\",\n            \"55_CPU\",\n            \"56_CPU\",\n            \"57_CPU\",\n            \"58_CPU\",\n            \"59_CPU\",\n            \"60_CPU\",\n            \"61_CPU\",\n            \"62_CPU\",\n            \"63_CPU\",\n            \"64_CPU\",\n            \"65_CPU\",\n            \"66_CPU\",\n            \"67_CPU\",\n            \"68_CPU\",\n            \"69_CPU\",\n            \"70_CPU\",\n            \"71_CPU\",\n            \"72_CPU\",\n            \"73_CPU\",\n            \"74_CPU\",\n            \"75_CPU\",\n            \"76_CPU\",\n            \"77_CPU\",\n            \"78_CPU\",\n            \"79_CPU\",\n            \"80_CPU\",\n            \"81_CPU\",\n            \"82_CPU\",\n            \"83_CPU\",\n            \"84_CPU\",\n            \"85_CPU\",\n            \"86_CPU\",\n            \"87_CPU\",\n            \"88_CPU\",\n            \"89_CPU\",\n            \"90_CPU\",\n            \"91_CPU\",\n            \"92_CPU\",\n            \"93_CPU\",\n            \"94_CPU\",\n            \"95_CPU\",\n            \"96_CPU\",\n            \"97_CPU\",\n            \"98_CPU\",\n            \"99_CPU\",\n            \"100_CPU\",\n            \"101_CPU\",\n            \"102_CPU\",\n            \"103_CPU\",\n            \"104_CPU\",\n            \"105_CPU\",\n            \"106_CPU\",\n            \"107_CPU\",\n            \"108_CPU\",\n            \"109_CPU\",\n            \"110_CPU\",\n            \"111_CPU\",\n            \"112_CPU\",\n            \"113_CPU\",\n            \"114_CPU\",\n            \"115_CPU\",\n            \"116_CPU\",\n            \"117_CPU\",\n            \"118_CPU\",\n            \"119_CPU\",\n            \"120_CPU\",\n            \"121_CPU\",\n            \"122_CPU\",\n            \"123_CPU\",\n            \"124_CPU\",\n            \"125_CPU\",\n            \"126_CPU\",\n            \"127_CPU\"\n          ]\n        },\n        {\n          \"name\": \"domain_busy\",\n          \"values\": [\n            51049666116086,\n            13419960088,\n            13297686377,\n            1735383373870,\n            39183250298,\n            6728050897,\n            28229793795,\n            17493622207,\n            122290467,\n            974721172619,\n            47944793823,\n            164946850,\n            4162377932,\n            407009733276,\n            128199854099,\n            9037374471285,\n            38911301970,\n            366749865,\n            732045734,\n            2997541695,\n            14,\n            18,\n            40\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"domain_shared\",\n          \"values\": [\n            0,\n            685164024474,\n            0,\n            0,\n            0,\n            24684879894,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"dswitchto_cnt\",\n          \"values\": [\n            0,\n            322698663,\n            172936437,\n            446893016,\n            96971,\n            39788918,\n            5,\n            10,\n            10670440,\n            22,\n            7,\n            836,\n            2407967,\n            9798186907,\n            9802868991,\n            265242,\n            53,\n            2614118,\n            4430780,\n            66117706,\n            1,\n            1,\n            1\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"intr_cnt\",\n          \"values\": [\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            4191453008,\n            8181232,\n            1625052957,\n            0,\n            71854,\n            0,\n            71854,\n            0,\n            5,\n            0,\n            5,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"dev_0\",\n            \"dev_1\",\n            \"dev_2\",\n            \"dev_3\",\n            \"dev_4\",\n            \"dev_5\",\n            \"dev_6\",\n            \"dev_7\",\n            \"dev_8\",\n            \"dev_9\",\n            \"dev_10\",\n            \"dev_11\",\n            \"dev_12\",\n            \"dev_13\",\n            \"dev_14\",\n            \"dev_15\",\n            \"dev_16\",\n            \"dev_17\",\n            \"dev_18\",\n            \"dev_19\",\n            \"dev_20\",\n            \"dev_21\",\n            \"dev_22\",\n            \"dev_23\",\n            \"dev_24\",\n            \"dev_25\",\n            \"dev_26\",\n            \"dev_27\",\n            \"dev_28\",\n            \"dev_29\",\n            \"dev_30\",\n            \"dev_31\",\n            \"dev_32\",\n            \"dev_33\",\n            \"dev_34\",\n            \"dev_35\",\n            \"dev_36\",\n            \"dev_37\",\n            \"dev_38\",\n            \"dev_39\",\n            \"dev_40\",\n            \"dev_41\",\n            \"dev_42\",\n            \"dev_43\",\n            \"dev_44\",\n            \"dev_45\",\n            \"dev_46\",\n            \"dev_47\",\n            \"dev_48\",\n            \"dev_49\",\n            \"dev_50\",\n            \"dev_51\",\n            \"dev_52\",\n            \"dev_53\",\n            \"dev_54\",\n            \"dev_55\",\n            \"dev_56\",\n            \"dev_57\",\n            \"dev_58\",\n            \"dev_59\",\n            \"dev_60\",\n            \"dev_61\",\n            \"dev_62\",\n            \"dev_63\",\n            \"dev_64\",\n            \"dev_65\",\n            \"dev_66\",\n            \"dev_67\",\n            \"dev_68\",\n            \"dev_69\",\n            \"dev_70\",\n            \"dev_71\",\n            \"dev_72\",\n            \"dev_73\",\n            \"dev_74\",\n            \"dev_75\",\n            \"dev_76\",\n            \"dev_77\",\n            \"dev_78\",\n            \"dev_79\",\n            \"dev_80\",\n            \"dev_81\",\n            \"dev_82\",\n            \"dev_83\",\n            \"dev_84\",\n            \"dev_85\",\n            \"dev_86\",\n            \"dev_87\",\n            \"dev_88\",\n            \"dev_89\",\n            \"dev_90\",\n            \"dev_91\",\n            \"dev_92\",\n            \"dev_93\",\n            \"dev_94\",\n            \"dev_95\",\n            \"dev_96\",\n            \"dev_97\",\n            \"dev_98\",\n            \"dev_99\",\n            \"dev_100\",\n            \"dev_101\",\n            \"dev_102\",\n            \"dev_103\",\n            \"dev_104\",\n            \"dev_105\",\n            \"dev_106\",\n            \"dev_107\",\n            \"dev_108\",\n            \"dev_109\",\n            \"dev_110\",\n            \"dev_111\",\n            \"dev_112\",\n            \"dev_113\",\n            \"dev_114\",\n            \"dev_115\",\n            \"dev_116\",\n            \"dev_117\",\n            \"dev_118\",\n            \"dev_119\",\n            \"dev_120\",\n            \"dev_121\",\n            \"dev_122\",\n            \"dev_123\",\n            \"dev_124\",\n            \"dev_125\",\n            \"dev_126\",\n            \"dev_127\",\n            \"dev_128\",\n            \"dev_129\",\n            \"dev_130\",\n            \"dev_131\",\n            \"dev_132\",\n            \"dev_133\",\n            \"dev_134\",\n            \"dev_135\",\n            \"dev_136\",\n            \"dev_137\",\n            \"dev_138\",\n            \"dev_139\",\n            \"dev_140\",\n            \"dev_141\",\n            \"dev_142\",\n            \"dev_143\",\n            \"dev_144\",\n            \"dev_145\",\n            \"dev_146\",\n            \"dev_147\",\n            \"dev_148\",\n            \"dev_149\",\n            \"dev_150\",\n            \"dev_151\",\n            \"dev_152\",\n            \"dev_153\",\n            \"dev_154\",\n            \"dev_155\",\n            \"dev_156\",\n            \"dev_157\",\n            \"dev_158\",\n            \"dev_159\",\n            \"dev_160\",\n            \"dev_161\",\n            \"dev_162\",\n            \"dev_163\",\n            \"dev_164\",\n            \"dev_165\",\n            \"dev_166\",\n            \"dev_167\",\n            \"dev_168\",\n            \"dev_169\",\n            \"dev_170\",\n            \"dev_171\",\n            \"dev_172\",\n            \"dev_173\",\n            \"dev_174\",\n            \"dev_175\",\n            \"dev_176\",\n            \"dev_177\",\n            \"dev_178\",\n            \"dev_179\",\n            \"dev_180\",\n            \"dev_181\",\n            \"dev_182\",\n            \"dev_183\",\n            \"dev_184\",\n            \"dev_185\",\n            \"dev_186\",\n            \"dev_187\",\n            \"dev_188\",\n            \"dev_189\",\n            \"dev_190\",\n            \"dev_191\",\n            \"dev_192\",\n            \"dev_193\",\n            \"dev_194\",\n            \"dev_195\",\n            \"dev_196\",\n            \"dev_197\",\n            \"dev_198\",\n            \"dev_199\",\n            \"dev_200\",\n            \"dev_201\",\n            \"dev_202\",\n            \"dev_203\",\n            \"dev_204\",\n            \"dev_205\",\n            \"dev_206\",\n            \"dev_207\",\n            \"dev_208\",\n            \"dev_209\",\n            \"dev_210\",\n            \"dev_211\",\n            \"dev_212\",\n            \"dev_213\",\n            \"dev_214\",\n            \"dev_215\",\n            \"dev_216\",\n            \"dev_217\",\n            \"dev_218\",\n            \"dev_219\",\n            \"dev_220\",\n            \"dev_221\",\n            \"dev_222\",\n            \"dev_223\",\n            \"dev_224\",\n            \"dev_225\",\n            \"dev_226\",\n            \"dev_227\",\n            \"dev_228\",\n            \"dev_229\",\n            \"dev_230\",\n            \"dev_231\",\n            \"dev_232\",\n            \"dev_233\",\n            \"dev_234\",\n            \"dev_235\",\n            \"dev_236\",\n            \"dev_237\",\n            \"dev_238\",\n            \"dev_239\",\n            \"dev_240\",\n            \"dev_241\",\n            \"dev_242\",\n            \"dev_243\",\n            \"dev_244\",\n            \"dev_245\",\n            \"dev_246\",\n            \"dev_247\",\n            \"dev_248\",\n            \"dev_249\",\n            \"dev_250\",\n            \"dev_251\",\n            \"dev_252\",\n            \"dev_253\",\n            \"dev_254\",\n            \"dev_255\"\n          ]\n        },\n        {\n          \"name\": \"wafliron\",\n          \"values\": [\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"iron_totstarts\",\n            \"iron_nobackup\",\n            \"iron_usebackup\"\n          ]\n        }\n      ],\n      \"aggregation\": {\n        \"count\": 2,\n        \"complete\": true\n      },\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system:node/rows/umeng-aff300-01%3A28e14eab-0580-11e8-bd9d-00a098d39e12\"\n        }\n      }\n    },\n    {\n      \"counter_table\": {\n        \"name\": \"system:node\"\n      },\n      \"id\": \"umeng-aff300-02:1524afca-0580-11e8-ae74-00a098d390f2\",\n      \"properties\": [\n        {\n          \"name\": \"node.name\",\n          \"value\": \"umeng-aff300-02\"\n        },\n        {\n          \"name\": \"system_model\",\n          \"value\": \"AFF-A300\"\n        },\n        {\n          \"name\": \"ontap_version\",\n          \"value\": \"NetApp Release R9.12.1xN_221108_1315: Tue Nov  8 15:32:25 EST 2022 \"\n        },\n        {\n          \"name\": \"compile_flags\",\n          \"value\": \"1\"\n        },\n        {\n          \"name\": \"serial_no\",\n          \"value\": \"721802000259\"\n        },\n        {\n          \"name\": \"system_id\",\n          \"value\": \"0537123843\"\n        },\n        {\n          \"name\": \"hostname\",\n          \"value\": \"umeng-aff300-02\"\n        },\n        {\n          \"name\": \"name\",\n          \"value\": \"umeng-aff300-02\"\n        },\n        {\n          \"name\": \"uuid\",\n          \"value\": \"1524afca-0580-11e8-ae74-00a098d390f2\"\n        }\n      ],\n      \"counters\": [\n        {\n          \"name\": \"memory\",\n          \"value\": 88766\n        },\n        {\n          \"name\": \"nfs_ops\",\n          \"value\": 2061227971\n        },\n        {\n          \"name\": \"cifs_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"fcp_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"iscsi_ops\",\n          \"value\": 183570559\n        },\n        {\n          \"name\": \"nvme_fc_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"network_data_received\",\n          \"value\": 28707362447\n        },\n        {\n          \"name\": \"network_data_sent\",\n          \"value\": 31199786274\n        },\n        {\n          \"name\": \"fcp_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"fcp_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"iscsi_data_received\",\n          \"value\": 2462501728\n        },\n        {\n          \"name\": \"iscsi_data_sent\",\n          \"value\": 962425592\n        },\n        {\n          \"name\": \"nvme_fc_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_fc_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"partner_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"partner_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"sys_read_data\",\n          \"value\": 28707362447\n        },\n        {\n          \"name\": \"sys_write_data\",\n          \"value\": 31199786274\n        },\n        {\n          \"name\": \"sys_total_data\",\n          \"value\": 59907148721\n        },\n        {\n          \"name\": \"disk_data_read\",\n          \"value\": 27355740700\n        },\n        {\n          \"name\": \"disk_data_written\",\n          \"value\": 3426898232\n        },\n        {\n          \"name\": \"hdd_data_read\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"hdd_data_written\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"ssd_data_read\",\n          \"value\": 27355740700\n        },\n        {\n          \"name\": \"ssd_data_written\",\n          \"value\": 3426898232\n        },\n        {\n          \"name\": \"tape_data_read\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"tape_data_written\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"read_ops\",\n          \"value\": 29957410\n        },\n        {\n          \"name\": \"write_ops\",\n          \"value\": 2141657620\n        },\n        {\n          \"name\": \"other_ops\",\n          \"value\": 73183500\n        },\n        {\n          \"name\": \"total_ops\",\n          \"value\": 2244798530\n        },\n        {\n          \"name\": \"read_latency\",\n          \"value\": 43283636161\n        },\n        {\n          \"name\": \"write_latency\",\n          \"value\": 1437635703835\n        },\n        {\n          \"name\": \"other_latency\",\n          \"value\": 628457365\n        },\n        {\n          \"name\": \"total_latency\",\n          \"value\": 1481547797361\n        },\n        {\n          \"name\": \"read_data\",\n          \"value\": 1908711454978\n        },\n        {\n          \"name\": \"write_data\",\n          \"value\": 23562759645410\n        },\n        {\n          \"name\": \"other_data\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"total_data\",\n          \"value\": 25471471100388\n        },\n        {\n          \"name\": \"cpu_busy\",\n          \"value\": 511050841704\n        },\n        {\n          \"name\": \"cpu_elapsed_time\",\n          \"value\": 3979039364919\n        },\n        {\n          \"name\": \"average_processor_busy_percent\",\n          \"value\": 509151403977\n        },\n        {\n          \"name\": \"total_processor_busy\",\n          \"value\": 8146422463632\n        },\n        {\n          \"name\": \"total_processor_busy_time\",\n          \"value\": 8146422463632\n        },\n        {\n          \"name\": \"num_processors\",\n          \"value\": 16\n        },\n        {\n          \"name\": \"interrupt_time\",\n          \"value\": 108155323601\n        },\n        {\n          \"name\": \"interrupt\",\n          \"value\": 108155323601\n        },\n        {\n          \"name\": \"interrupt_num\",\n          \"value\": 3369179127\n        },\n        {\n          \"name\": \"time_per_interrupt\",\n          \"value\": 108155323601\n        },\n        {\n          \"name\": \"non_interrupt_time\",\n          \"value\": 8038267140031\n        },\n        {\n          \"name\": \"non_interrupt\",\n          \"value\": 8038267140031\n        },\n        {\n          \"name\": \"idle_time\",\n          \"value\": 55518207375072\n        },\n        {\n          \"name\": \"idle\",\n          \"value\": 55518207375072\n        },\n        {\n          \"name\": \"cp_time\",\n          \"value\": 64306316680\n        },\n        {\n          \"name\": \"cp\",\n          \"value\": 64306316680\n        },\n        {\n          \"name\": \"interrupt_in_cp_time\",\n          \"value\": 2024956616\n        },\n        {\n          \"name\": \"interrupt_in_cp\",\n          \"value\": 2024956616\n        },\n        {\n          \"name\": \"interrupt_num_in_cp\",\n          \"value\": 2661183541\n        },\n        {\n          \"name\": \"time_per_interrupt_in_cp\",\n          \"value\": 2024956616\n        },\n        {\n          \"name\": \"sk_switches\",\n          \"value\": 2798598514\n        },\n        {\n          \"name\": \"hard_switches\",\n          \"value\": 1354185066\n        },\n        {\n          \"name\": \"intr_cnt_msec\",\n          \"value\": 3978642246\n        },\n        {\n          \"name\": \"intr_cnt_ipi\",\n          \"value\": 797281\n        },\n        {\n          \"name\": \"intr_cnt_total\",\n          \"value\": 905575861\n        },\n        {\n          \"name\": \"time\",\n          \"value\": 1677516216\n        },\n        {\n          \"name\": \"uptime\",\n          \"value\": 3978643\n        },\n        {\n          \"name\": \"processor_plevel_time\",\n          \"values\": [\n            2878770221447,\n            1882901052733,\n            1209134416474,\n            771086627192,\n            486829133301,\n            306387520688,\n            193706139760,\n            123419519944,\n            79080346535,\n            50459518003,\n            31714732122,\n            19476561954,\n            11616026278,\n            6666253598,\n            3623880168,\n            1790458071,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"0_CPU\",\n            \"1_CPU\",\n            \"2_CPU\",\n            \"3_CPU\",\n            \"4_CPU\",\n            \"5_CPU\",\n            \"6_CPU\",\n            \"7_CPU\",\n            \"8_CPU\",\n            \"9_CPU\",\n            \"10_CPU\",\n            \"11_CPU\",\n            \"12_CPU\",\n            \"13_CPU\",\n            \"14_CPU\",\n            \"15_CPU\",\n            \"16_CPU\",\n            \"17_CPU\",\n            \"18_CPU\",\n            \"19_CPU\",\n            \"20_CPU\",\n            \"21_CPU\",\n            \"22_CPU\",\n            \"23_CPU\",\n            \"24_CPU\",\n            \"25_CPU\",\n            \"26_CPU\",\n            \"27_CPU\",\n            \"28_CPU\",\n            \"29_CPU\",\n            \"30_CPU\",\n            \"31_CPU\",\n            \"32_CPU\",\n            \"33_CPU\",\n            \"34_CPU\",\n            \"35_CPU\",\n            \"36_CPU\",\n            \"37_CPU\",\n            \"38_CPU\",\n            \"39_CPU\",\n            \"40_CPU\",\n            \"41_CPU\",\n            \"42_CPU\",\n            \"43_CPU\",\n            \"44_CPU\",\n            \"45_CPU\",\n            \"46_CPU\",\n            \"47_CPU\",\n            \"48_CPU\",\n            \"49_CPU\",\n            \"50_CPU\",\n            \"51_CPU\",\n            \"52_CPU\",\n            \"53_CPU\",\n            \"54_CPU\",\n            \"55_CPU\",\n            \"56_CPU\",\n            \"57_CPU\",\n            \"58_CPU\",\n            \"59_CPU\",\n            \"60_CPU\",\n            \"61_CPU\",\n            \"62_CPU\",\n            \"63_CPU\",\n            \"64_CPU\",\n            \"65_CPU\",\n            \"66_CPU\",\n            \"67_CPU\",\n            \"68_CPU\",\n            \"69_CPU\",\n            \"70_CPU\",\n            \"71_CPU\",\n            \"72_CPU\",\n            \"73_CPU\",\n            \"74_CPU\",\n            \"75_CPU\",\n            \"76_CPU\",\n            \"77_CPU\",\n            \"78_CPU\",\n            \"79_CPU\",\n            \"80_CPU\",\n            \"81_CPU\",\n            \"82_CPU\",\n            \"83_CPU\",\n            \"84_CPU\",\n            \"85_CPU\",\n            \"86_CPU\",\n            \"87_CPU\",\n            \"88_CPU\",\n            \"89_CPU\",\n            \"90_CPU\",\n            \"91_CPU\",\n            \"92_CPU\",\n            \"93_CPU\",\n            \"94_CPU\",\n            \"95_CPU\",\n            \"96_CPU\",\n            \"97_CPU\",\n            \"98_CPU\",\n            \"99_CPU\",\n            \"100_CPU\",\n            \"101_CPU\",\n            \"102_CPU\",\n            \"103_CPU\",\n            \"104_CPU\",\n            \"105_CPU\",\n            \"106_CPU\",\n            \"107_CPU\",\n            \"108_CPU\",\n            \"109_CPU\",\n            \"110_CPU\",\n            \"111_CPU\",\n            \"112_CPU\",\n            \"113_CPU\",\n            \"114_CPU\",\n            \"115_CPU\",\n            \"116_CPU\",\n            \"117_CPU\",\n            \"118_CPU\",\n            \"119_CPU\",\n            \"120_CPU\",\n            \"121_CPU\",\n            \"122_CPU\",\n            \"123_CPU\",\n            \"124_CPU\",\n            \"125_CPU\",\n            \"126_CPU\",\n            \"127_CPU\"\n          ]\n        },\n        {\n          \"name\": \"processor_plevel\",\n          \"values\": [\n            2878770221447,\n            1882901052733,\n            1209134416474,\n            771086627192,\n            486829133301,\n            306387520688,\n            193706139760,\n            123419519944,\n            79080346535,\n            50459518003,\n            31714732122,\n            19476561954,\n            11616026278,\n            6666253598,\n            3623880168,\n            1790458071,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"0_CPU\",\n            \"1_CPU\",\n            \"2_CPU\",\n            \"3_CPU\",\n            \"4_CPU\",\n            \"5_CPU\",\n            \"6_CPU\",\n            \"7_CPU\",\n            \"8_CPU\",\n            \"9_CPU\",\n            \"10_CPU\",\n            \"11_CPU\",\n            \"12_CPU\",\n            \"13_CPU\",\n            \"14_CPU\",\n            \"15_CPU\",\n            \"16_CPU\",\n            \"17_CPU\",\n            \"18_CPU\",\n            \"19_CPU\",\n            \"20_CPU\",\n            \"21_CPU\",\n            \"22_CPU\",\n            \"23_CPU\",\n            \"24_CPU\",\n            \"25_CPU\",\n            \"26_CPU\",\n            \"27_CPU\",\n            \"28_CPU\",\n            \"29_CPU\",\n            \"30_CPU\",\n            \"31_CPU\",\n            \"32_CPU\",\n            \"33_CPU\",\n            \"34_CPU\",\n            \"35_CPU\",\n            \"36_CPU\",\n            \"37_CPU\",\n            \"38_CPU\",\n            \"39_CPU\",\n            \"40_CPU\",\n            \"41_CPU\",\n            \"42_CPU\",\n            \"43_CPU\",\n            \"44_CPU\",\n            \"45_CPU\",\n            \"46_CPU\",\n            \"47_CPU\",\n            \"48_CPU\",\n            \"49_CPU\",\n            \"50_CPU\",\n            \"51_CPU\",\n            \"52_CPU\",\n            \"53_CPU\",\n            \"54_CPU\",\n            \"55_CPU\",\n            \"56_CPU\",\n            \"57_CPU\",\n            \"58_CPU\",\n            \"59_CPU\",\n            \"60_CPU\",\n            \"61_CPU\",\n            \"62_CPU\",\n            \"63_CPU\",\n            \"64_CPU\",\n            \"65_CPU\",\n            \"66_CPU\",\n            \"67_CPU\",\n            \"68_CPU\",\n            \"69_CPU\",\n            \"70_CPU\",\n            \"71_CPU\",\n            \"72_CPU\",\n            \"73_CPU\",\n            \"74_CPU\",\n            \"75_CPU\",\n            \"76_CPU\",\n            \"77_CPU\",\n            \"78_CPU\",\n            \"79_CPU\",\n            \"80_CPU\",\n            \"81_CPU\",\n            \"82_CPU\",\n            \"83_CPU\",\n            \"84_CPU\",\n            \"85_CPU\",\n            \"86_CPU\",\n            \"87_CPU\",\n            \"88_CPU\",\n            \"89_CPU\",\n            \"90_CPU\",\n            \"91_CPU\",\n            \"92_CPU\",\n            \"93_CPU\",\n            \"94_CPU\",\n            \"95_CPU\",\n            \"96_CPU\",\n            \"97_CPU\",\n            \"98_CPU\",\n            \"99_CPU\",\n            \"100_CPU\",\n            \"101_CPU\",\n            \"102_CPU\",\n            \"103_CPU\",\n            \"104_CPU\",\n            \"105_CPU\",\n            \"106_CPU\",\n            \"107_CPU\",\n            \"108_CPU\",\n            \"109_CPU\",\n            \"110_CPU\",\n            \"111_CPU\",\n            \"112_CPU\",\n            \"113_CPU\",\n            \"114_CPU\",\n            \"115_CPU\",\n            \"116_CPU\",\n            \"117_CPU\",\n            \"118_CPU\",\n            \"119_CPU\",\n            \"120_CPU\",\n            \"121_CPU\",\n            \"122_CPU\",\n            \"123_CPU\",\n            \"124_CPU\",\n            \"125_CPU\",\n            \"126_CPU\",\n            \"127_CPU\"\n          ]\n        },\n        {\n          \"name\": \"domain_busy\",\n          \"values\": [\n            55518207375080,\n            8102895398,\n            12058227646,\n            991838747162,\n            28174147737,\n            6669066926,\n            14245801778,\n            9009875224,\n            118982762,\n            177496844302,\n            5888814259,\n            167280195,\n            3851617905,\n            484154906167,\n            91240285306,\n            6180138216837,\n            22111798640,\n            344700584,\n            266304074,\n            2388625825,\n            16,\n            21,\n            19\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"domain_shared\",\n          \"values\": [\n            0,\n            153663450171,\n            0,\n            0,\n            0,\n            11834112384,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"dswitchto_cnt\",\n          \"values\": [\n            0,\n            178192633,\n            143964155,\n            286324250,\n            2365,\n            39684121,\n            5,\n            10,\n            10715325,\n            22,\n            7,\n            30,\n            2407970,\n            7865489299,\n            7870331008,\n            265242,\n            53,\n            2535145,\n            3252888,\n            53334340,\n            1,\n            1,\n            1\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"intr_cnt\",\n          \"values\": [\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            724698481,\n            8181275,\n            488080162,\n            0,\n            71856,\n            0,\n            71856,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"dev_0\",\n            \"dev_1\",\n            \"dev_2\",\n            \"dev_3\",\n            \"dev_4\",\n            \"dev_5\",\n            \"dev_6\",\n            \"dev_7\",\n            \"dev_8\",\n            \"dev_9\",\n            \"dev_10\",\n            \"dev_11\",\n            \"dev_12\",\n            \"dev_13\",\n            \"dev_14\",\n            \"dev_15\",\n            \"dev_16\",\n            \"dev_17\",\n            \"dev_18\",\n            \"dev_19\",\n            \"dev_20\",\n            \"dev_21\",\n            \"dev_22\",\n            \"dev_23\",\n            \"dev_24\",\n            \"dev_25\",\n            \"dev_26\",\n            \"dev_27\",\n            \"dev_28\",\n            \"dev_29\",\n            \"dev_30\",\n            \"dev_31\",\n            \"dev_32\",\n            \"dev_33\",\n            \"dev_34\",\n            \"dev_35\",\n            \"dev_36\",\n            \"dev_37\",\n            \"dev_38\",\n            \"dev_39\",\n            \"dev_40\",\n            \"dev_41\",\n            \"dev_42\",\n            \"dev_43\",\n            \"dev_44\",\n            \"dev_45\",\n            \"dev_46\",\n            \"dev_47\",\n            \"dev_48\",\n            \"dev_49\",\n            \"dev_50\",\n            \"dev_51\",\n            \"dev_52\",\n            \"dev_53\",\n            \"dev_54\",\n            \"dev_55\",\n            \"dev_56\",\n            \"dev_57\",\n            \"dev_58\",\n            \"dev_59\",\n            \"dev_60\",\n            \"dev_61\",\n            \"dev_62\",\n            \"dev_63\",\n            \"dev_64\",\n            \"dev_65\",\n            \"dev_66\",\n            \"dev_67\",\n            \"dev_68\",\n            \"dev_69\",\n            \"dev_70\",\n            \"dev_71\",\n            \"dev_72\",\n            \"dev_73\",\n            \"dev_74\",\n            \"dev_75\",\n            \"dev_76\",\n            \"dev_77\",\n            \"dev_78\",\n            \"dev_79\",\n            \"dev_80\",\n            \"dev_81\",\n            \"dev_82\",\n            \"dev_83\",\n            \"dev_84\",\n            \"dev_85\",\n            \"dev_86\",\n            \"dev_87\",\n            \"dev_88\",\n            \"dev_89\",\n            \"dev_90\",\n            \"dev_91\",\n            \"dev_92\",\n            \"dev_93\",\n            \"dev_94\",\n            \"dev_95\",\n            \"dev_96\",\n            \"dev_97\",\n            \"dev_98\",\n            \"dev_99\",\n            \"dev_100\",\n            \"dev_101\",\n            \"dev_102\",\n            \"dev_103\",\n            \"dev_104\",\n            \"dev_105\",\n            \"dev_106\",\n            \"dev_107\",\n            \"dev_108\",\n            \"dev_109\",\n            \"dev_110\",\n            \"dev_111\",\n            \"dev_112\",\n            \"dev_113\",\n            \"dev_114\",\n            \"dev_115\",\n            \"dev_116\",\n            \"dev_117\",\n            \"dev_118\",\n            \"dev_119\",\n            \"dev_120\",\n            \"dev_121\",\n            \"dev_122\",\n            \"dev_123\",\n            \"dev_124\",\n            \"dev_125\",\n            \"dev_126\",\n            \"dev_127\",\n            \"dev_128\",\n            \"dev_129\",\n            \"dev_130\",\n            \"dev_131\",\n            \"dev_132\",\n            \"dev_133\",\n            \"dev_134\",\n            \"dev_135\",\n            \"dev_136\",\n            \"dev_137\",\n            \"dev_138\",\n            \"dev_139\",\n            \"dev_140\",\n            \"dev_141\",\n            \"dev_142\",\n            \"dev_143\",\n            \"dev_144\",\n            \"dev_145\",\n            \"dev_146\",\n            \"dev_147\",\n            \"dev_148\",\n            \"dev_149\",\n            \"dev_150\",\n            \"dev_151\",\n            \"dev_152\",\n            \"dev_153\",\n            \"dev_154\",\n            \"dev_155\",\n            \"dev_156\",\n            \"dev_157\",\n            \"dev_158\",\n            \"dev_159\",\n            \"dev_160\",\n            \"dev_161\",\n            \"dev_162\",\n            \"dev_163\",\n            \"dev_164\",\n            \"dev_165\",\n            \"dev_166\",\n            \"dev_167\",\n            \"dev_168\",\n            \"dev_169\",\n            \"dev_170\",\n            \"dev_171\",\n            \"dev_172\",\n            \"dev_173\",\n            \"dev_174\",\n            \"dev_175\",\n            \"dev_176\",\n            \"dev_177\",\n            \"dev_178\",\n            \"dev_179\",\n            \"dev_180\",\n            \"dev_181\",\n            \"dev_182\",\n            \"dev_183\",\n            \"dev_184\",\n            \"dev_185\",\n            \"dev_186\",\n            \"dev_187\",\n            \"dev_188\",\n            \"dev_189\",\n            \"dev_190\",\n            \"dev_191\",\n            \"dev_192\",\n            \"dev_193\",\n            \"dev_194\",\n            \"dev_195\",\n            \"dev_196\",\n            \"dev_197\",\n            \"dev_198\",\n            \"dev_199\",\n            \"dev_200\",\n            \"dev_201\",\n            \"dev_202\",\n            \"dev_203\",\n            \"dev_204\",\n            \"dev_205\",\n            \"dev_206\",\n            \"dev_207\",\n            \"dev_208\",\n            \"dev_209\",\n            \"dev_210\",\n            \"dev_211\",\n            \"dev_212\",\n            \"dev_213\",\n            \"dev_214\",\n            \"dev_215\",\n            \"dev_216\",\n            \"dev_217\",\n            \"dev_218\",\n            \"dev_219\",\n            \"dev_220\",\n            \"dev_221\",\n            \"dev_222\",\n            \"dev_223\",\n            \"dev_224\",\n            \"dev_225\",\n            \"dev_226\",\n            \"dev_227\",\n            \"dev_228\",\n            \"dev_229\",\n            \"dev_230\",\n            \"dev_231\",\n            \"dev_232\",\n            \"dev_233\",\n            \"dev_234\",\n            \"dev_235\",\n            \"dev_236\",\n            \"dev_237\",\n            \"dev_238\",\n            \"dev_239\",\n            \"dev_240\",\n            \"dev_241\",\n            \"dev_242\",\n            \"dev_243\",\n            \"dev_244\",\n            \"dev_245\",\n            \"dev_246\",\n            \"dev_247\",\n            \"dev_248\",\n            \"dev_249\",\n            \"dev_250\",\n            \"dev_251\",\n            \"dev_252\",\n            \"dev_253\",\n            \"dev_254\",\n            \"dev_255\"\n          ]\n        },\n        {\n          \"name\": \"wafliron\",\n          \"values\": [\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"iron_totstarts\",\n            \"iron_nobackup\",\n            \"iron_usebackup\"\n          ]\n        }\n      ],\n      \"aggregation\": {\n        \"count\": 2,\n        \"complete\": true\n      },\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system:node/rows/umeng-aff300-02%3A1524afca-0580-11e8-ae74-00a098d390f2\"\n        }\n      }\n    }\n  ],\n  \"num_records\": 2,\n  \"_links\": {\n    \"self\": {\n      \"href\": \"/api/cluster/counter/tables/system:node/rows?fields=*&return_records=true\"\n    }\n  }\n}\n

      "},{"location":"resources/rest-perf-metrics/#references","title":"References","text":"
      • Harvest REST Strategy
      • ONTAP 9.11.1 ONTAPI-to-REST Counter Manager Mapping
      • ONTAP REST API reference documentation
      • ONTAP REST API
      "},{"location":"resources/templates-and-metrics/","title":"Harvest Templates and Metrics","text":"

      Harvest collects ONTAP counter information, augments it, and stores it in a time-series DB. Refer ONTAP Metrics for details about ONTAP metrics exposed by Harvest.

      flowchart RL\n    Harvest[Harvest<br>Get & Augment] -- REST<br>ZAPI --> ONTAP\n    id1[(Prometheus<br>Store)] -- Scrape --> Harvest

      Three concepts work in unison to collect ONTAP metrics data, prepare it and make it available to Prometheus.

      • ZAPI/REST
      • Harvest templates
      • Exporters

      We're going to walk through an example from a running system, focusing on the disk object.

      At a high-level, Harvest templates describe what ZAPIs to send to ONTAP and how to interpret the responses.

      • ONTAP defines twos ZAPIs to collect disk info
        • Config information is collected via storage-disk-get-iter
        • Performance counters are collected via disk:constituent
      • These ZAPIs are found in their corresponding object template file conf/zapi/cdot/9.8.0/disk.yaml and conf/zapiperf/cdot/9.8.0/disk.yaml. These files also describe how to map the ZAPI responses into a time-series-friendly format
      • Prometheus uniquely identifies a time series by its metric name and optional key-value pairs called labels.
      "},{"location":"resources/templates-and-metrics/#handy-tools","title":"Handy Tools","text":"
      • dasel is useful to convert between XML, YAML, JSON, etc. We'll use it to make displaying some of the data easier.
      "},{"location":"resources/templates-and-metrics/#ontap-zapi-disk-example","title":"ONTAP ZAPI disk example","text":"

      We'll use the bin/harvest zapi tool to interrogate the cluster and gather information about the counters. This is one way you can send ZAPIs to ONTAP and explore the return types and values.

      bin/harvest zapi -p u2 show attrs --api storage-disk-get-iter\n

      Output edited for brevity and line numbers added on left

      The hierarchy and return type of each counter is shown below. We'll use this hierarchy to build a matching Harvest template. For example, line 3 is the bytes-per-sector counter, which has an integer value, and is the child of storage-disk-info > disk-inventory-info.

      To capture that counter's value as a metric in a Harvest, the ZAPI template must use the same hierarchical path. The matching path can be seen below.

      building tree for attribute [attributes-list] => [storage-disk-info]\n\n 1 [storage-disk-info]            -               *\n 2   [disk-inventory-info]        -                \n 3     [bytes-per-sector]         -         integer\n 4     [capacity-sectors]         -         integer\n 5     [disk-type]                -          string\n 6     [is-shared]                -         boolean\n 7     [model]                    -          string\n 8     [serial-number]            -          string\n 9     [shelf]                    -          string\n10     [shelf-bay]                -          string\n11   [disk-name]                  -          string\n12   [disk-ownership-info]        -                \n13     [home-node-name]           -          string\n14     [is-failed]                -         boolean\n15     [owner-node-name]          -          string\n16   [disk-raid-info]             -                \n17     [container-type]           -          string\n18     [disk-outage-info]         -                \n19       [is-in-fdr]              -         boolean\n20       [reason]                 -          string  \n21   [disk-stats-info]            -                \n22     [average-latency]          -         integer\n23     [disk-io-kbps]             -         integer\n24     [power-on-time-interval]   -         integer\n25     [sectors-read]             -         integer\n26     [sectors-written]          -         integer\n27   [disk-uid]                   -          string\n28   [node-name]                  -          string\n29   [storage-disk-state]         -         integer\n30   [storage-disk-state-flags]   -         integer\n
      "},{"location":"resources/templates-and-metrics/#harvest-templates","title":"Harvest Templates","text":"

      To understand templates, there are a few concepts to cover:

      There are three kinds of information included in templates that define what Harvest collects and exports:

      1. Configuration information is exported into the _labels metric (e.g. disk_labels see below)
      2. Metrics data is exported as disk_\"metric name\" e.g. disk_bytes_per_sector, disk_sectors, etc. Metrics are leaf nodes that are not prefixed with a ^ or ^^. Metrics must be one of the number types: float or int.
      3. Plugins may add additional metrics, increasing the number of metrics exported in #2

      A resource will typically have multiple instances. Using disk as an example, that means there will be one disk_labels and a metric row per instance. If we have 24 disks and the disk template lists seven metrics to capture, Harvest will export a total of 192 rows of Prometheus data.

      24 instances * (7 metrics per instance + 1 label per instance) = 192 rows

      Sum of disk metrics that Harvest exports

      curl -s 'http://localhost:14002/metrics' | grep ^disk | cut -d'{' -f1 | sort | uniq -c\n  24 disk_bytes_per_sector\n  24 disk_labels\n  24 disk_sectors\n  24 disk_stats_average_latency\n  24 disk_stats_io_kbps\n  24 disk_stats_sectors_read\n  24 disk_stats_sectors_written\n  24 disk_uptime\n# 192 rows \n

      Read on to see how we control which labels from #1 and which metrics from #2 are included in the exported data.

      "},{"location":"resources/templates-and-metrics/#instance-keys-and-labels","title":"Instance Keys and Labels","text":"
      • Instance key - An instance key defines the set of attributes Harvest uses to construct a key that uniquely identifies an object. For example, the disk template uses the node + disk attributes to determine uniqueness. Using node or disk alone wouldn't be sufficient since disks on separate nodes can have the same name. If a single label does not uniquely identify an instance, combine multiple keys for uniqueness. Instance keys must refer to attributes that are of type string.

      Because instance keys define uniqueness, these keys are also added to each metric as a key-value pair. ( see Control What Labels and Metrics are Exported for examples)

      • Instance label - Labels are key-value pairs used to gather configuration information about each instance. All of the key-value pairs are combined into a single metric named disk_labels. There will be one disk_labels for each monitored instance. Here's an example reformatted so it's easier to read:
      disk_labels{\n  datacenter=\"dc-1\",\n  cluster=\"umeng-aff300-05-06\",\n  node=\"umeng-aff300-06\",\n  disk=\"1.1.23\",\n  type=\"SSD\",\n  model=\"X371_S1643960ATE\",\n  outage=\"\",\n  owner_node=\"umeng-aff300-06\",\n  shared=\"true\",\n  shelf=\"1\",\n  shelf_bay=\"23\",\n  serial_number=\"S3SENE0K500532\",\n  failed=\"false\",\n  container_type=\"shared\"\n}\n
      "},{"location":"resources/templates-and-metrics/#harvest-object-template","title":"Harvest Object Template","text":"

      Continuing with the disk example, below is the conf/zapi/cdot/9.8.0/disk.yaml that tells Harvest which ZAPI to send to ONTAP (storage-disk-get-iter) and describes how to interpret and export the response.

      • Line 1 defines the name of this resource and is an exact match to the object defined in your default.yaml or custom.yaml file. Eg.
      # default.yaml\nobjects:\n  Disk:  disk.yaml\n
      • Line 2 is the name of the ZAPI that Harvest will send to collect disk resources
      • Line 3 is the prefix used to export metrics associated with this object. i.e. all metrics will be of the form disk_*
      • Line 5 the counter section is where we define the metrics, labels, and what constitutes instance uniqueness
      • Line 7 the double hat prefix ^^ means this attribute is an instance key used to determine uniqueness. Instance keys are also included as labels. Uuids are good choices for uniqueness
      • Line 13 the single hat prefix ^ means this attribute should be stored as a label. That means we can include it in the export_options section as one of the key-value pairs in disk_labels
      • Rows 10, 11, 23, 24, 25, 26, 27 - these are the metrics rows - metrics are leaf nodes that are not prefixed with a ^ or ^^. If you refer back to the ONTAP ZAPI disk example above, you'll notice each of these attributes are integer types.
      • Line 43 defines the set of labels to use when constructing the disk_labels metrics. As mentioned above, these labels capture config-related attributes per instance.

      Output edited for brevity and line numbers added for reference.

       1  name:             Disk\n 2  query:            storage-disk-get-iter\n 3  object:           disk\n 4  \n 5  counters:\n 6    storage-disk-info:\n 7      - ^^disk-uid\n 8      - ^^disk-name               => disk\n 9      - disk-inventory-info:\n10        - bytes-per-sector        => bytes_per_sector        # notice this has the same hierarchical path we saw from bin/harvest zapi\n11        - capacity-sectors        => sectors\n12        - ^disk-type              => type\n13        - ^is-shared              => shared\n14        - ^model                  => model\n15        - ^serial-number          => serial_number\n16        - ^shelf                  => shelf\n17        - ^shelf-bay              => shelf_bay\n18      - disk-ownership-info:\n19        - ^home-node-name         => node\n20        - ^owner-node-name        => owner_node\n21        - ^is-failed              => failed\n22      - disk-stats-info:\n23        - average-latency\n24        - disk-io-kbps\n25        - power-on-time-interval  => uptime\n26        - sectors-read\n27        - sectors-written\n28      - disk-raid-info:\n29        - ^container-type         => container_type\n30        - disk-outage-info:\n31          - ^reason               => outage\n32  \n33  plugins:\n34    - LabelAgent:\n35      # metric label zapi_value rest_value `default_value`\n36      value_to_num:\n37        - new_status outage - - `0` #ok_value is empty value, '-' would be converted to blank while processing.\n38  \n39  export_options:\n40    instance_keys:\n41      - node\n42      - disk\n43    instance_labels:\n44      - type\n45      - model\n46      - outage\n47      - owner_node\n48      - shared\n49      - shelf\n50      - shelf_bay\n51      - serial_number\n52      - failed\n53      - container_type\n
      "},{"location":"resources/templates-and-metrics/#control-what-labels-and-metrics-are-exported","title":"Control What Labels and Metrics are Exported","text":"

      Let's continue with disk and look at a few examples. We'll use curl to examine the Prometheus wire format that Harvest uses to export the metrics from conf/zapi/cdot/9.8.0/disk.yaml.

      The curl below shows all exported disk metrics. There are 24 disks on this cluster, Harvest is collecting seven metrics + one disk_labels + one plugin-created metric, disk_new_status for a total of 216 rows.

      curl -s 'http://localhost:14002/metrics' | grep ^disk | cut -d'{' -f1 | sort | uniq -c\n  24 disk_bytes_per_sector           # metric\n  24 disk_labels                     # labels \n  24 disk_new_status                 # plugin created metric \n  24 disk_sectors                    # metric \n  24 disk_stats_average_latency      # metric   \n  24 disk_stats_io_kbps              # metric \n  24 disk_stats_sectors_read         # metric   \n  24 disk_stats_sectors_written      # metric  \n  24 disk_uptime                     # metric\n# sum = ((7 + 1 + 1) * 24 = 216 rows)\n

      Here's a disk_labels for one instance, reformatted to make it easier to read.

      curl -s 'http://localhost:14002/metrics' | grep ^disk_labels | head -1\n\ndisk_labels{\n  datacenter = \"dc-1\",                 # always included - value taken from datacenter in harvest.yml\n  cluster = \"umeng-aff300-05-06\",      # always included\n  node = \"umeng-aff300-06\",            # node is in the list of export_options instance_keys\n  disk = \"1.1.13\",                     # disk is in the list of export_options instance_keys\n  type = \"SSD\",                        # remainder are included because they are listed in the template's instance_labels\n  model = \"X371_S1643960ATE\",\n  outage = \"\",\n  owner_node = \"umeng-aff300-06\",\n  shared = \"true\",\n  shelf = \"1\",\n  shelf_bay = \"13\",\n  serial_number = \"S3SENE0K500572\",\n  failed = \"false\",\n  container_type = \"\",\n} 1.0\n

      Here's the disk_sectors metric for a single instance.

      curl -s 'http://localhost:14002/metrics' | grep ^disk_sectors | head -1\n\ndisk_sectors{                          # prefix of disk_ + metric name (line 11 in template)\n  datacenter = \"dc-1\",                 # always included - value taken from datacenter in harvest.yml\n  cluster = \"umeng-aff300-05-06\",      # always included\n  node = \"umeng-aff300-06\",            # node is in the list of export_options instance_keys\n  disk = \"1.1.17\",                     # disk is in the list of export_options instance_keys\n} 1875385008                           # metric value - number of sectors for this disk instance\n
      Number of rows for each template = number of instances * (number of metrics + 1 (for <name>_labels row) + plugin additions)\nNumber of metrics                = number of counters which are not labels or keys, those without a ^ or ^^\n
      "},{"location":"resources/templates-and-metrics/#common-errors-and-troubleshooting","title":"Common Errors and Troubleshooting","text":""},{"location":"resources/templates-and-metrics/#1-failed-to-parse-any-metrics","title":"1. Failed to parse any metrics","text":"

      You add a new template to Harvest, restart your poller, and get an error message:

      WRN ./poller.go:649 > init collector-object (Zapi:NetPort): no metrics => failed to parse any\n

      This means the collector, Zapi NetPort, was unable to find any metrics. Recall metrics are lines without prefixes. In cases where you don't have any metrics, but still want to collect labels, add the collect_only_labels: true key-value to your template. This flag tells Harvest to ignore that you don't have metrics and continue. Example.

      "},{"location":"resources/templates-and-metrics/#2-missing-data","title":"2. Missing Data","text":"
      1. What happens if an attribute is listed in the list of instance_labels (line 43 above), but that label is missing from the list of counters captured at line 5?

      The label will still be written into disk_labels, but the value will be empty since it's missing. e.g if line 29 was deleted container_type would still be present in disk_labels{container_type=\"\"}.

      "},{"location":"resources/templates-and-metrics/#prometheus-wire-format","title":"Prometheus Wire Format","text":"

      https://prometheus.io/docs/instrumenting/exposition_formats/

      Keep in mind that Prometheus does not permit dashes (-) in labels. That's why Harvest templates use name replacement to convert dashed-names to underscored-names with =>. e.g. bytes-per-sector => bytes_per_sector converts bytes-per-sector into the Prometheus accepted bytes_per_sector.

      Every time series is uniquely identified by its metric name and optional key-value pairs called labels.

      Labels enable Prometheus's dimensional data model: any combination of labels for the same metric name identifies a particular dimensional instantiation of that metric (for example: all HTTP requests that used the method POST to the /api/tracks handler). The query language allows filtering and aggregation based on these dimensions. Changing any label value, including adding or removing a label, will create a new time series.

      <metric_name>{<label_name>=<label_value>, ...} value [ timestamp ]

      • metric_name and label_name carry the usual Prometheus expression language restrictions
      • label_value can be any sequence of UTF-8 characters, but the backslash (), double-quote (\"), and line feed (\\n) characters have to be escaped as \\, \\\", and \\n, respectively.
      • value is a float represented as required by Go's ParseFloat() function. In addition to standard numerical values, NaN, +Inf, and -Inf are valid values representing not a number, positive infinity, and negative infinity, respectively.
      • timestamp is an int64 (milliseconds since epoch, i.e. 1970-01-01 00:00:00 UTC, excluding leap seconds), represented as required by Go's ParseInt() function

      Exposition formats

      "},{"location":"resources/zapi-and-rest-gap/","title":"ZAPI and REST Gaps","text":""},{"location":"resources/zapi-and-rest-gap/#volume-count-difference","title":"Volume Count difference","text":"

      The REST and ZAPI collectors return a different number of volume_labels depending on whether you have set up object store servers on your cluster.

      • The REST collector does not include volume_labels for volumes associated with object store servers.
      • The ZAPI collector includes volume_labels for volumes associated with object store servers. If you have not set up any object store servers on your cluster, both collectors will return the same number of volume_labels.
      "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"What is Harvest?","text":"

      Harvest is the open-metrics endpoint for ONTAP and StorageGRID

      NetApp Harvest brings observability to ONTAP and StorageGRID clusters. Harvest collects performance, capacity and hardware metrics from ONTAP and StorageGRID, transforms them, and routes them to your choice of a time-series database.

      The included Grafana dashboards deliver the datacenter insights you need, while new metrics can be collected with a few edits of the included template files.

      Harvest is open-source, released under an Apache2 license, and offers great flexibility in how you collect, augment, and export your datacenter metrics.

      Out-of-the-box Harvest provides a set of pollers, collectors, templates, exporters, an optional auto-discover daemon, and a set of StorageGRID and ONTAP dashboards for Prometheus and Grafana. Harvest collects the metrics and makes them available to a separately installed instance of Prometheus/InfluxDB and Grafana.

      • Concepts
      • Quickstart Guide

      If you'd like to familiarize yourself with Harvest's core concepts, we recommend reading concepts.

      If you feel comfortable with the concepts, we recommend our quickstart guide, which takes you through a practical example.

      Note

      Hop onto our Discord or GitHub discussions and say hi. \ud83d\udc4b\ud83c\udffd

      "},{"location":"MigratePrometheusDocker/","title":"Migrate Prometheus Docker Volume","text":"

      If you want to keep your historical Prometheus data, and you generated your harvest-compose.yml file via bin/harvest generate before Harvest 22.11, please follow the steps below to migrate your historical Prometheus data.

      This is not required if you generated your harvest-compose.yml file via bin/harvest generate at Harvest release 22.11 or after.

      Outline of steps: 1. Stop Prometheus container so data acquiesces 2. Find historical Prometheus volume and create new Prometheus data volume 3. Create a new Prometheus volume that Harvest 22.11 and after will use 4. Copy the historical Prometheus data from the old volume to the new one 5. Optionally remove the historical Prometheus volume

      "},{"location":"MigratePrometheusDocker/#stop-prometheus-container","title":"Stop Prometheus container","text":"

      It's safe to run the stop and rm commands below regardless if Prometheus is running or not since removing the container does not touch the historical data stored in the volume.

      Stop all containers named Prometheus and remove them.

      docker stop (docker ps -fname=prometheus -q) && docker rm (docker ps -a -fname=prometheus -q)\n

      Docker may complain if the container is not running, like so. You can ignore this.

      Ignorable output when container is not running (click me)
      \"docker stop\" requires at least 1 argument.\nSee 'docker stop --help'.\n\nUsage:  docker stop [OPTIONS] CONTAINER [CONTAINER...]\n\nStop one or more running containers\n
      "},{"location":"MigratePrometheusDocker/#find-the-name-of-the-prometheus-volume-that-has-the-historical-data","title":"Find the name of the Prometheus volume that has the historical data","text":"
      docker volume ls -f name=prometheus -q\n

      Output should look like this:

      harvest-22080-1_linux_amd64_prometheus_data  # historical Prometheus data here\nharvest_prometheus_data                      # it is fine if this line is missing\n

      We want to copy the historical data from harvest-22080-1_linux_amd64_prometheus_data to harvest_prometheus_data

      If harvest_prometheus_data already exists, you need to decide if you want to move that volume's data to a different volume or remove it. If you want to remove the volume, run docker volume rm harvest_prometheus_data. If you want to move the data, adjust the command below to first copy harvest_prometheus_data to a different volume and then remove it.

      "},{"location":"MigratePrometheusDocker/#create-new-prometheus-volume","title":"Create new Prometheus volume","text":"

      We're going to create a new mount named, harvest_prometheus_data by executing:

      docker volume create --name harvest_prometheus_data\n
      "},{"location":"MigratePrometheusDocker/#copy-the-historical-prometheus-data","title":"Copy the historical Prometheus data","text":"

      We will copy the historical Prometheus data from the old volume to the new one by mounting both volumes and copying data between them. NOTE: Prometheus only supports copying a single volume. It will not work if you attempt to copy multiple volumes into the same destination volume.

      # replace  `HISTORICAL_VOLUME` with the name of the Prometheus volume that contains you historical data found in step 2.\ndocker run --rm -it -v $HISTORICAL_VOLUME:/from -v harvest_prometheus_data:/to alpine ash -c \"cd /from ; cp -av . /to\"\n

      Output will look something like this:

      './wal' -> '/to/./wal'\n'./wal/00000000' -> '/to/./wal/00000000'\n'./chunks_head' -> '/to/./chunks_head'\n...\n
      "},{"location":"MigratePrometheusDocker/#optionally-remove-historical-prometheus-data","title":"Optionally remove historical Prometheus data","text":"

      Before removing the historical data, start your compose stack and make sure everything works.

      Once you're satisfied that you can destroy the old data, remove it like so.

      # replace `HISTORICAL_VOLUME` with the name of the Prometheus volume that contains your historical data found in step 2.\ndocker volume rm $HISTORICAL_VOLUME\n
      "},{"location":"MigratePrometheusDocker/#reference","title":"Reference","text":"
      • Rename Docker Volume
      "},{"location":"concepts/","title":"Concepts","text":"

      In order to understand how Harvest works, it's important to understand the following concepts:

      • Poller
      • Collectors
      • Templates
      • Exporters
      • Dashboards
      • Port Map

      In addition to the above concepts, Harvest uses the following software that you will want to be familiar with:

      • Prometheus
      • InfluxDB
      • Dashboards
      • Prometheus Auto-discover
      • Docker
      • NABox
      "},{"location":"concepts/#poller","title":"Poller","text":"

      The poller is the resident daemon process that coordinates the collectors and exporters. There will be one poller per monitored cluster.

      "},{"location":"concepts/#collectors","title":"Collectors","text":"

      Collectors implement the necessary protocol required to speak to the cluster. Harvest ships with ZAPI, REST, EMS, and StorageGRID collectors. Collectors use a set of per-object template files to determine which metrics to collect.

      More information:

      • Configuring Collectors
      "},{"location":"concepts/#templates","title":"Templates","text":"

      Templates define which metrics should be collected for an object (e.g. volume, lun, SVM, etc.). Harvest ships with a set of templates for each collector. The templates are written in YAML and are straightforward to read and modify. The templates are located in the conf directory.

      There are two kinds of templates:

      "},{"location":"concepts/#collector-templates","title":"Collector Templates","text":"

      Collector templates (e.g. conf/rest/default.yaml) define which set of objects Harvest should collect from the system being monitored when that collector runs. For example, the conf/rest/default.yaml collector template defines which objects should be collected by the REST collector, while conf/storagegrid/default.yaml lists which objects should be collected by the StorageGRID collector.

      "},{"location":"concepts/#object-templates","title":"Object Templates","text":"

      Object templates (e.g. conf/rest/9.12.0/disk.yaml) define which metrics should be collected and exported for an object. For example, the disk.yaml object template defines which disk metrics should be collected (e.g. disk_bytes_per_sector, disk_stats_average_latency, disk_uptime, etc.)

      More information:

      • Templates
      • Templates and Metrics
      "},{"location":"concepts/#exporters","title":"Exporters","text":"

      Exporters are responsible for encoding the collected metrics and making them available to time-series databases. Harvest ships with Prometheus and InfluxDB exporters. Harvest does not include Prometheus and InfluxDB, only the exporters for them. Prometheus and InfluxDB must be installed separately via Docker, NAbox, or other means.

      "},{"location":"concepts/#prometheus","title":"Prometheus","text":"

      Prometheus is an open-source time-series database. It is a popular choice for storing and querying metrics.

      Don't call us, we'll call you

      None of the pollers know anything about Prometheus. That's because Prometheus pulls metrics from the poller's Prometheus exporter. The exporter creates an HTTP(s) endpoint that Prometheus scrapes on its own schedule.

      More information:

      • Prometheus Exporter
      "},{"location":"concepts/#influxdb","title":"InfluxDB","text":"

      InfluxDB is an open-source time-series database. Harvest ships with some sample Grafana dashboards that are designed to work with InfluxDB. Unlike the Prometheus exporter, Harvest's InfluxDB exporter pushes metrics from the poller to InfluxDB via InfluxDB's line protocol. The exporter is compatible with InfluxDB v2.0.

      Note

      Harvest includes a subset of dashboards for InfluxDB. There is a richer set of dashboards available for Prometheus.

      More information:

      • InfluxDB Exporter
      "},{"location":"concepts/#dashboards","title":"Dashboards","text":"

      Harvest ships with a set of Grafana dashboards that are primarily designed to work with Prometheus. The dashboards are located in the grafana/dashboards directory. Harvest does not include Grafana, only the dashboards for it. Grafana must be installed separately via Docker, NAbox, or other means.

      Harvest includes CLI tools to import and export dashboards to Grafana. The CLI tools are available by running bin/harvest grafana --help

      More information:

      • Import or Export Dashboards
      • How to Create A New Dashboard
      "},{"location":"concepts/#prometheus-auto-discovery","title":"Prometheus Auto-Discovery","text":"

      Because of Prometheus's pull model, you need to configure Prometheus to tell it where to pull metrics from. There are two ways to tell Prometheus how to scrape Harvest: 1) listing each poller's address and port individually in Prometheus's config file or 2) using HTTP service discovery.

      Harvest's admin node implements Prometheus's HTTP service discovery API. Each poller registers its address and port with the admin node and Prometheus consults with the admin node for the list of targets it should scrape.

      More information:

      • Configure Prometheus to scrape Harvest pollers
      • Prometheus Admin Node
      • Prometheus HTTP Service Discovery
      "},{"location":"concepts/#docker","title":"Docker","text":"

      Harvest runs natively in containers. The Harvest container includes the harvest and poller binaries as well as all templates and dashboards. If you want to standup Harvest, Prometheus, and Grafana all together, you can use the Docker Compose workflow. The Docker Compose workflow is a good way to quickly get started with Harvest.

      More information:

      • Running Harvest in Docker
      • Running Harvest, Prometheus, and Grafana in Docker
      "},{"location":"concepts/#nabox","title":"NABox","text":"

      NABox is a separate virtual appliance (.ova) that acts as a front-end to Harvest and includes Promethus and Grafana setup to use with Harvest. NABox is a great option for customers that prefer a virtual appliance over containers.

      More information:

      • NABox
      "},{"location":"concepts/#port-map","title":"Port Map","text":"

      The default ports for ONTAP, Grafana, and Prometheus are shown below, along with three pollers. Poller1 is using the PrometheusExporter with a statically defined port in harvest.yml. Poller2 and Poller3 are using Harvest's admin node, port range, and Prometheus HTTP service discovery.

      graph LR\n  Poller1 -->|:443|ONTAP1;\n  Prometheus -->|:promPort1|Poller1;\n  Prometheus -->|:promPort2|Poller2;\n  Prometheus -->|:promPort3|Poller3;\n  Prometheus -->|:8887|AdminNode;\n\n  Poller2 -->|:443|ONTAP2;\n  AdminNode <-->|:8887|Poller3;\n  Poller3 -->|:443|ONTAP3;\n  AdminNode <-->|:8887|Poller2;\n\n  Grafana -->|:9090|Prometheus;\n  Browser -->|:3000|Grafana;
      • Grafana's default port is 3000 and is used to access the Grafana user-interface via a web browser
      • Prometheus's default port is 9090 and Grafana talks to the Prometheus datasource on that port
      • Prometheus scrapes each poller-exposed Prometheus port (promPort1, promPort2, promPort3)
      • Poller2 and Poller3 are configured to use a PrometheusExporter with port range. Each pollers picks a free port within the port_range and sends that port to the AdminNode.
      • The Prometheus config file, prometheus.yml is updated with two scrape targets:

        1. the static address:port for Poller1
        2. the address:port for the AdminNode
      • Poller1 creates an HTTP endpoint on the static port defined in the harvest.yml file

      • All pollers use ZAPI or REST to communicate with ONTAP on port 443
      "},{"location":"concepts/#reference","title":"Reference","text":"
      • Architecture.md
      "},{"location":"configure-ems/","title":"EMS","text":""},{"location":"configure-ems/#ems-collector","title":"EMS collector","text":"

      The EMS collector collects ONTAP event management system (EMS) events via the ONTAP REST API.

      The EMS alert runbook includes descriptions and remediation steps for the EMS events that Harvest collects.

      This collector uses a YAML template file to define which events to collect, export, and what labels to attach to each metric. This means you can collect new EMS events or attach new labels by editing the default template file or by extending existing templates. Events that occurred when the EMS collector was not running will not be captured.

      The default template file contains 98 EMS events.

      "},{"location":"configure-ems/#supported-ontap-systems","title":"Supported ONTAP Systems","text":"

      Any cDOT ONTAP system using 9.6 or higher.

      "},{"location":"configure-ems/#requirements","title":"Requirements","text":"

      It is recommended to create a read-only user on the ONTAP system. See prepare an ONTAP cDOT cluster for details.

      "},{"location":"configure-ems/#metrics","title":"Metrics","text":"

      This collector collects EMS events from ONTAP and for each received EMS event, creates new metrics prefixed with ems_events.

      Harvest supports two types of ONTAP EMS events:

      • Normal EMS events

      Single shot events. When ONTAP detects a problem, an event is raised. When the issue is addressed, ONTAP does not raise another event reflecting that the problem was resolved.

      • Bookend EMS events

      ONTAP creates bookend events in matching pairs. ONTAP creates an event when an issue is detected and another paired event when the event is resolved. Typically, these events share a common set of properties.

      "},{"location":"configure-ems/#collector-configuration","title":"Collector Configuration","text":"

      The parameters of the collector are distributed across three files:

      • Harvest configuration file (default: harvest.yml)
      • EMS collector configuration file (default: conf/ems/default.yaml)
      • EMS template file (located in conf/ems/9.6.0/ems.yaml)

      Except for addr, datacenter, and auth_style, all other parameters of the EMS collector can be defined in either of these three files. Parameters defined in the lower-level files, override parameters in the higher-level file. This allows you to configure each EMS event individually, or use the same parameters for all events.

      "},{"location":"configure-ems/#ems-collector-configuration-file","title":"EMS Collector Configuration File","text":"

      This configuration file contains the parameters that are used to configure the EMS collector. These parameters can be defined in your harvest.yml or conf/ems/default.yaml file.

      parameter type description default client_timeout Go duration how long to wait for server responses 1m schedule list, required the polling frequency of the collector/object. Should include exactly the following two elements in the order specified: - instance Go duration polling frequency for updating the instance cache (example value: 24h = 1440m) - data Go duration polling frequency for updating the data cache (example value: 3m)Note Harvest allows defining poll intervals on sub-second level (e.g. 1ms), however keep in mind the following:
      • API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than client_timeout.
      • Small poll intervals will create significant workload on the ONTAP system.

      The EMS configuration file should contain the following section mapping the Ems object to the corresponding template file.

      objects:\n  Ems: ems.yaml\n

      Even though the EMS mapping shown above references a single file named ems.yaml, there may be multiple versions of that file across subdirectories named after ONTAP releases. See cDOT for examples.

      At runtime, the EMS collector will select the appropriate object configuration file that most closely matches the targeted ONTAP system.

      "},{"location":"configure-ems/#ems-template-file","title":"EMS Template File","text":"

      The EMS template file should contain the following parameters:

      parameter type description default name string display name of the collector. this matches the named defined in your conf/ems/default.yaml file EMS object string short name of the object, used to prefix metrics ems query string REST API endpoint used to query EMS events api/support/ems/events exports list list of default labels attached to each exported metric events list list of EMS events to collect. See Event Parameters"},{"location":"configure-ems/#event-parameters","title":"Event Parameters","text":"

      This section defines the list of EMS events you want to collect, which properties to export, what labels to attach, and how to handle bookend pairs. The EMS event template parameters are explained below along with an example for reference.

      • name is the ONTAP EMS event name. (collect ONTAP EMS events with the name of LUN.offline)
      • matches list of name-value pairs used to further filter ONTAP events. Some EMS events include arguments and these name-value pairs provide a way to filter on those arguments. (Only collect ONTAP EMS events where volume_name has the value abc_vol)
      • exports list of EMS event parameters to export. These exported parameters are attached as labels to each matching EMS event.
        • labels that are prefixed with ^^ use that parameter to define instance uniqueness.
      • resolve_when_ems (applicable to bookend events only). Lists the resolving event that pairs with the issuing event
        • name is the ONTAP EMS event name of the resolving EMS event (LUN.online). When the resolving event is received, the issuing EMS event will be resolved. In this example, Harvest will raise an event when it finds the ONTAP EMS event named LUN.offline and that event will be resolved when the EMS event named LUN.online is received.
        • resolve_after (optional, Go duration, default = 28 days) resolve the issuing EMS after the specified duration has elapsed (672h = 28d). If the bookend pair is not received within the resolve_after duration, the Issuing EMS event expires. When that happens, Harvest will mark the event as auto resolved by adding the autoresolved=true label to the issuing EMS event.
        • resolve_key (optional) bookend key used to match bookend EMS events. Defaults to prefixed (^^) labels in exports section. resolve_key allows you to override what is defined in the exports section.

      Labels are only exported if they are included in the exports section.

      Example template definition for the LUN.offline EMS event:

        - name: LUN.offline\n    matches:\n      - name: volume_name\n        value: abc_vol\n    exports:\n      - ^^parameters.object_uuid            => object_uuid\n      - parameters.object_type              => object_type\n      - parameters.lun_path                 => lun_path\n      - parameters.volume_name              => volume\n      - parameters.volume_dsid              => volume_ds_id\n    resolve_when_ems:\n      - name: LUN.online\n        resolve_after: 672h\n        resolve_key:\n          - ^^parameters.object_uuid        => object_uuid\n
      "},{"location":"configure-ems/#how-do-i-find-the-full-list-of-supported-ems-events","title":"How do I find the full list of supported EMS events?","text":"

      ONTAP documents the list of EMS events created in the ONTAP EMS Event Catalog.

      You can also query a live system and ask the cluster for its event catalog like so:

      curl --insecure --user \"user:password\" 'https://10.61.124.110/api/support/ems/messages?fields=*'\n

      Example Output

      {\n  \"records\": [\n    {\n      \"name\": \"AccessCache.NearLimits\",\n      \"severity\": \"alert\",\n      \"description\": \"This message occurs when the access cache module is near its limits for entries or export rules. Reaching these limits can prevent new clients from being able to mount and perform I/O on the storage system, and can also cause clients to be granted or denied access based on stale cached information.\",\n      \"corrective_action\": \"Ensure that the number of clients accessing the storage system continues to be below the limits for access cache entries and export rules across those entries. If the set of clients accessing the storage system is constantly changing, consider using the \\\"vserver export-policy access-cache config modify\\\" command to reduce the harvest timeout parameter so that cache entries for clients that are no longer accessing the storage system can be evicted sooner.\",\n      \"snmp_trap_type\": \"severity_based\",\n      \"deprecated\": false\n    },\n...\n    {\n      \"name\": \"ztl.smap.online.status\",\n      \"severity\": \"notice\",\n      \"description\": \"This message occurs when the specified partition on a Software Defined Flash drive could not be onlined due to internal S/W or device error.\",\n      \"corrective_action\": \"NONE\",\n      \"snmp_trap_type\": \"severity_based\",\n      \"deprecated\": false\n    }\n  ],\n  \"num_records\": 7273\n}\n
      "},{"location":"configure-ems/#ems-prometheus-alerts","title":"Ems Prometheus Alerts","text":"

      Refer Prometheus-Alerts

      "},{"location":"configure-grafana/","title":"Configure Grafana","text":""},{"location":"configure-grafana/#grafana","title":"Grafana","text":"

      Grafana hosts the Harvest dashboards and needs to be setup before importing your dashboards.

      "},{"location":"configure-harvest-advanced/","title":"Configure Harvest (advanced)","text":"

      This chapter describes additional advanced configuration possibilities of NetApp Harvest. For a typical installation, this level of detail is likely not needed.

      "},{"location":"configure-harvest-advanced/#variable-expansion","title":"Variable Expansion","text":"

      The harvest.yml configuration file supports variable expansion. This allows you to use environment variables in the configuration file. Harvest will expand strings with the format $__env{VAR} or ${VAR}, replacing the variable VAR with the value of the environment variable. If the environment variable is not set, the variable will be replaced with an empty string.

      Here's an example snippet from harvest.yml:

      Pollers:\n  netapp_frankfurt:\n    addr: 10.0.1.2\n    username: $__env{NETAPP_FRANKFURT_RO_USER}\n  netapp_london:\n    addr: uk-cluster\n    username: ${NETAPP_LONDON_RO_USER}\n  netapp_rtp:\n    addr: 10.0.1.4\n    username: $__env{NETAPP_RTP_RO_USER}\n

      If you set the environment variable NETAPP_FRANKFURT_RO_USER to harvest1 and NETAPP_LONDON_RO_USER to harvest2, the configuration will be expanded to:

      Pollers:\n  netapp_frankfurt:\n    addr: 10.0.1.2\n    username: harvest1\n  netapp_london:\n    addr: uk-cluster\n    username: harvest2\n  netapp_rtp:\n    addr: 10.0.1.4\n    username: \n
      "},{"location":"configure-harvest-basic/","title":"Configure Harvest (basic)","text":"

      The main configuration file, harvest.yml, consists of the following sections, described below:

      "},{"location":"configure-harvest-basic/#pollers","title":"Pollers","text":"

      All pollers are defined in harvest.yml, the main configuration file of Harvest, under the section Pollers.

      parameter type description default Poller name (header) required Poller name, user-defined value datacenter required Datacenter name, user-defined value addr required by some collectors IPv4, IPv6 or FQDN of the target system collectors required List of collectors to run for this poller exporters required List of exporter names from the Exporters section. Note: this should be the name of the exporter (e.g. prometheus1), not the value of the exporter key (e.g. Prometheus) auth_style required by Zapi* collectors Either basic_auth or certificate_auth See authentication for details basic_auth username, password required if auth_style is basic_auth ssl_cert, ssl_key optional if auth_style is certificate_auth Paths to SSL (client) certificate and key used to authenticate with the target system.If not provided, the poller will look for <hostname>.key and <hostname>.pem in $HARVEST_HOME/cert/.To create certificates for ONTAP systems, see using certificate authentication ca_cert optional if auth_style is certificate_auth Path to file that contains PEM encoded certificates. Harvest will append these certificates to the system-wide set of root certificate authorities (CA).If not provided, the OS's root CAs will be used.To create certificates for ONTAP systems, see using certificate authentication use_insecure_tls optional, bool If true, disable TLS verification when connecting to ONTAP cluster false credentials_file optional, string Path to a yaml file that contains cluster credentials. The file should have the same shape as harvest.yml. See here for examples. Path can be relative to harvest.yml or absolute. credentials_script optional, section Section that defines how Harvest should fetch credentials via external script. See here for details. tls_min_version optional, string Minimum TLS version to use when connecting to ONTAP cluster: One of tls10, tls11, tls12 or tls13 Platform decides labels optional, list of key-value pairs Each of the key-value pairs will be added to a poller's metrics. Details below log_max_bytes Maximum size of the log file before it will be rotated 10 MB log_max_files Number of rotated log files to keep 5 log optional, list of collector names Matching collectors log their ZAPI request/response prefer_zapi optional, bool Use the ZAPI API if the cluster supports it, otherwise allow Harvest to choose REST or ZAPI, whichever is appropriate to the ONTAP version. See rest-strategy for details. conf_path optional, : separated list of directories The search path Harvest uses to load its templates. Harvest walks each directory in order, stopping at the first one that contains the desired template. conf"},{"location":"configure-harvest-basic/#defaults","title":"Defaults","text":"

      This section is optional. If there are parameters identical for all your pollers (e.g., datacenter, authentication method, login preferences), they can be grouped under this section. The poller section will be checked first, and if the values aren't found there, the defaults will be consulted.

      "},{"location":"configure-harvest-basic/#exporters","title":"Exporters","text":"

      All exporters need two types of parameters:

      • exporter parameters - defined in harvest.yml under Exporters section
      • export_options - these options are defined in the Matrix data structure emitted from collectors and plugins

      The following two parameters are required for all exporters:

      parameter type description default Exporter name (header) required Name of the exporter instance, this is a user-defined value exporter required Name of the exporter class (e.g. Prometheus, InfluxDB, Http) - these can be found under the cmd/exporters/ directory

      Note: when we talk about the Prometheus Exporter or InfluxDB Exporter, we mean the Harvest modules that send the data to a database, NOT the names used to refer to the actual databases.

      "},{"location":"configure-harvest-basic/#prometheus-exporter","title":"Prometheus Exporter","text":""},{"location":"configure-harvest-basic/#influxdb-exporter","title":"InfluxDB Exporter","text":""},{"location":"configure-harvest-basic/#tools","title":"Tools","text":"

      This section is optional. You can uncomment the grafana_api_token key and add your Grafana API token so harvest does not prompt you for the key when importing dashboards.

      Tools:\n  #grafana_api_token: 'aaa-bbb-ccc-ddd'\n
      "},{"location":"configure-harvest-basic/#poller_files","title":"Poller_files","text":"

      Harvest supports loading pollers from multiple files specified in the Poller_files section of your harvest.yml file. For example, the following snippet tells harvest to load pollers from all the *.yml files under the configs directory, and from the path/to/single.yml file.

      Paths may be relative or absolute.

      Poller_files:\n    - configs/*.yml\n    - path/to/single.yml\n\nPollers:\n    u2:\n        datacenter: dc-1\n

      Each referenced file can contain one or more unique pollers. Ensure that you include the top-level Pollers section in these files. All other top-level sections will be ignored. For example:

      # contents of configs/00-rtp.yml\nPollers:\n  ntap3:\n    datacenter: rtp\n\n  ntap4:\n    datacenter: rtp\n---\n# contents of configs/01-rtp.yml\nPollers:\n  ntap5:\n    datacenter: blr\n---\n# contents of path/to/single.yml\nPollers:\n  ntap1:\n    datacenter: dc-1\n\n  ntap2:\n    datacenter: dc-1\n

      At runtime, all files will be read and combined into a single configuration. The example above would result in the following set of pollers in this order.

      - u2\n- ntap3\n- ntap4\n- ntap5\n- ntap1\n- ntap2\n

      When using glob patterns, the list of matching paths will be sorted before they are read. Errors will be logged for all duplicate pollers and Harvest will refuse to start.

      "},{"location":"configure-harvest-basic/#configuring-collectors","title":"Configuring collectors","text":"

      Collectors are configured by their own configuration files (templates), which are stored in subdirectories in conf/. Most collectors run concurrently and collect a subset of related metrics. For example, node related metrics are grouped together and run independently of the disk-related metrics. Below is a snippet from conf/zapi/default.yaml

      In this example, the default.yaml template contains a list of objects (e.g., Node) that reference sub-templates (e.g., node.yaml). This decomposition groups related metrics together and at runtime, a Zapi collector per object will be created and each of these collectors will run concurrently.

      Using the snippet below, we expect there to be four Zapi collectors running, each with a different subtemplate and object.

      collector:          Zapi\nobjects:\n  Node:             node.yaml\n  Aggregate:        aggr.yaml\n  Volume:           volume.yaml\n  SnapMirror:       snapmirror.yaml\n

      At start-up, Harvest looks for two files (default.yaml and custom.yaml) in the conf directory of the collector (e.g. conf/zapi/default.yaml). The default.yaml is installed by default, while the custom.yaml is an optional file you can create to add new templates.

      When present, the custom.yaml file will be merged with the default.yaml file. This behavior can be overridden in your harvest.yml, see here for an example.

      For a list of collector-specific parameters, refer to their individual documentation.

      "},{"location":"configure-harvest-basic/#zapi-and-zapiperf","title":"Zapi and ZapiPerf","text":""},{"location":"configure-harvest-basic/#rest-and-restperf","title":"Rest and RestPerf","text":""},{"location":"configure-harvest-basic/#ems","title":"EMS","text":""},{"location":"configure-harvest-basic/#storagegrid","title":"StorageGRID","text":""},{"location":"configure-harvest-basic/#unix","title":"Unix","text":""},{"location":"configure-harvest-basic/#labels","title":"Labels","text":"

      Labels offer a way to add additional key-value pairs to a poller's metrics. These allow you to tag a cluster's metrics in a cross-cutting fashion. Here's an example:

        cluster-03:\n    datacenter: DC-01\n    addr: 10.0.1.1\n    labels:\n      - org: meg       # add an org label with the value \"meg\"\n      - ns:  rtp       # add a namespace label with the value \"rtp\"\n

      These settings add two key-value pairs to each metric collected from cluster-03 like this:

      node_vol_cifs_write_data{org=\"meg\",ns=\"rtp\",datacenter=\"DC-01\",cluster=\"cluster-03\",node=\"umeng-aff300-05\"} 10\n

      Keep in mind that each unique combination of key-value pairs increases the amount of stored data. Use them sparingly. See PrometheusNaming for details.

      "},{"location":"configure-harvest-basic/#authentication","title":"Authentication","text":"

      When authenticating with ONTAP and StorageGRID clusters, Harvest supports both client certificates and basic authentication.

      These methods of authentication are defined in the Pollers or Defaults section of your harvest.yml using one or more of the following parameters.

      parameter description default Link auth_sytle One of basic_auth or certificate_auth Optional when using credentials_file or credentials_script basic_auth link username Username used for authenticating to the remote system link password Password used for authenticating to the remote system link credentials_file Relative or absolute path to a yaml file that contains cluster credentials link credentials_script External script Harvest executes to retrieve credentials link"},{"location":"configure-harvest-basic/#precedence","title":"Precedence","text":"

      When multiple authentication parameters are defined at the same time, Harvest tries each method listed below, in the following order, to resolve authentication requests. The first method that returns a non-empty password stops the search.

      When these parameters exist in both the Pollers and Defaults section, the Pollers section will be consulted before the Defaults.

      section parameter Pollers auth_style: certificate_auth Pollers auth_style: basic_auth with username and password Pollers credentials_script Pollers credentials_file Defaults auth_style: certificate_auth Defaults auth_style: basic_auth with username and password Defaults credentials_script Defaults credentials_file"},{"location":"configure-harvest-basic/#credentials-file","title":"Credentials File","text":"

      If you would rather not list cluster credentials in your harvest.yml, you can use the credentials_file section in your harvest.yml to point to a file that contains the credentials. At runtime, the credentials_file will be read and the included credentials will be used to authenticate with the matching cluster(s).

      This is handy when integrating with 3rd party credential stores. See #884 for examples.

      The format of the credentials_file is similar to harvest.yml and can contain multiple cluster credentials.

      Example:

      Snippet from harvest.yml:

      Pollers:\n  cluster1:\n    addr: 10.193.48.11\n    credentials_file: secrets/cluster1.yml\n    exporters:\n      - prom1 \n

      File secrets/cluster1.yml:

      Pollers:\n  cluster1:\n    username: harvest\n    password: foo\n
      "},{"location":"configure-harvest-basic/#credentials-script","title":"Credentials Script","text":"

      The credentials_script feature allows you to fetch authentication information via an external script. This can be configured in the Pollers section of your harvest.yml file, as shown in the example below.

      At runtime, Harvest will invoke the script specified in the credentials_script path section. Harvest will call the script with one or two arguments depending on how your poller is configured in the harvest.yml file. The script will be called like this: ./script $addr or ./script $addr $username.

      • The first argument $addr is the address of the cluster taken from the addr field under the Pollers section of your harvest.yml file.
      • The second argument $username is the username for the cluster taken from the username field under the Pollers section of your harvest.yml file. If your harvest.yml does not include a username, nothing will be passed.

      The script should communicate the credentials to Harvest by writing the response to its standard output (stdout). Harvest supports two output formats from the script:

      1. YAML format:
      2. If the script outputs a YAML object with username and password keys, Harvest will use both the username and password from the output. For example, if the script writes the following, Harvest will use myuser and mypassword for the poller's credentials.

        username: myuser\npassword: mypassword\n
        If only the password is provided, Harvest will use the username from the harvest.yml file, if available. If your username or password contains spaces, #, or other characters with special meaning in YAML, make sure you quote the value like so: password: \"my password with spaces\"

      3. If the script outputs a YAML object containing an authToken, Harvest will use this authToken when communicating with ONTAP or StorageGRID clusters. Harvest will include the authToken in the HTTP request's authorization header using the Bearer authentication scheme.

        authToken: eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJEcEVkRmgyODlaTXpYR25OekFvaWhTZ0FaUnBtVlVZSDJ3R3dXb0VIWVE0In0.eyJleHAiOjE3MjE4Mj\n

      4. Plain text format: If the script outputs plain text, Harvest will use the output as the password. The username will be taken from the harvest.yml file, if available. For example, if the script writes the following to its stdout, Harvest will use the username defined in that poller's section of the harvest.yml and mypassword for the poller's credentials.

        mypassword\n

      If the script doesn't finish within the specified timeout, Harvest will terminate the script and any spawned processes.

      Credential scripts are defined under the credentials_script section within Pollers in your harvest.yml. Below are the options for the credentials_script section:

      parameter type description default path string Absolute path to the script that takes two arguments: addr and username, in that order. schedule go duration or always Schedule for calling the authentication script. If set to always, the script is called every time a password is requested; otherwise, the previously cached value is used. 24h timeout go duration Maximum time Harvest will wait for the script to finish before terminating it and its descendants. 10s"},{"location":"configure-harvest-basic/#example","title":"Example","text":"

      Here is an example of how to configure the credentials_script in the harvest.yml file:

      Pollers:\n  ontap1:\n    datacenter: rtp\n    addr: 10.1.1.1\n    username: admin # Optional: if not provided, the script must return the username\n    collectors:\n      - Rest\n      - RestPerf\n    credentials_script:\n      path: ./get_credentials\n      schedule: 3h\n      timeout: 10s\n

      In this example, the get_credentials script should be located in the same directory as the harvest.yml file and should be executable. It should output the credentials in either YAML or plain text format. Here are three example scripts:

      get_credentials that outputs username and password in YAML format:

      #!/bin/bash\ncat << EOF\nusername: myuser\npassword: mypassword\nEOF\n

      get_credentials that outputs authToken in YAML format:

      #!/bin/bash\n# script requests an access token from the authorization server\n# authorization returns an access token to the script\n# script writes the YAML formatted authToken like so:\ncat << EOF\nauthToken: $authToken\nEOF\n

      Below are a couple of OAuth2 credential script examples for authenticating with ONTAP or StorageGRID OAuth2-enabled clusters.

      These are examples that you will need to adapt to your environment.

      Example OAuth2 script authenticating with the Keycloak auth provider via curl. Uses jq to extract the token. This script outputs the authToken in YAML format.

      #!/bin/bash\n\nresponse=$(curl --silent \"http://{KEYCLOAK_IP:PORT}/realms/{REALM_NAME}/protocol/openid-connect/token\" \\\n  --header \"Content-Type: application/x-www-form-urlencoded\" \\\n  --data-urlencode \"grant_type=password\" \\\n  --data-urlencode \"username={USERNAME}\" \\\n  --data-urlencode \"password={PASSWORD}\" \\\n  --data-urlencode \"client_id={CLIENT_ID}\" \\\n  --data-urlencode \"client_secret={CLIENT_SECRET}\")\n\naccess_token=$(echo \"$response\" | jq -r '.access_token')\n\ncat << EOF\nauthToken: $access_token\nEOF\n

      Example OAuth2 script authenticating with the Auth0 auth provider via curl. Uses jq to extract the token. This script outputs the authToken in YAML format.

      #!/bin/bash\nresponse=$(curl --silent https://{AUTH0_TENANT_URL}/oauth/token \\\n  --header 'content-type: application/json' \\\n  --data '{\"client_id\":\"{CLIENT_ID}\",\"client_secret\":\"{CLIENT_SECRET}\",\"audience\":\"{ONTAP_CLUSTER_IP}\",\"grant_type\":\"client_credentials\"')\n\naccess_token=$(echo \"$response\" | jq -r '.access_token')\n\ncat << EOF\nauthToken: $access_token\nEOF\n

      get_credentials that outputs only the password in plain text format:

      #!/bin/bash\necho \"mypassword\"\n

      "},{"location":"configure-harvest-basic/#troubleshooting","title":"Troubleshooting","text":"
      • Make sure your script is executable
      • Ensure the user/group that executes your poller also has read and execute permissions on the script. su as the user/group that runs Harvest and make sure you can execute the script too.
      "},{"location":"configure-rest/","title":"REST","text":""},{"location":"configure-rest/#rest-collector","title":"Rest Collector","text":"

      The Rest collectors uses the REST protocol to collect data from ONTAP systems.

      The RestPerf collector is an extension of this collector, therefore they share many parameters and configuration settings.

      "},{"location":"configure-rest/#target-system","title":"Target System","text":"

      Target system can be cDot ONTAP system. 9.12.1 and after are supported, however the default configuration files may not completely match with all versions. See REST Strategy for more details.

      "},{"location":"configure-rest/#requirements","title":"Requirements","text":"

      No SDK or other requirements. It is recommended to create a read-only user for Harvest on the ONTAP system (see prepare monitored clusters for details)

      "},{"location":"configure-rest/#metrics","title":"Metrics","text":"

      The collector collects a dynamic set of metrics. ONTAP returns JSON documents and Harvest allows you to define templates to extract values from the JSON document via a dot notation path. You can view ONTAP's full set of REST APIs by visiting https://docs.netapp.com/us-en/ontap-automation/reference/api_reference.html#access-a-copy-of-the-ontap-rest-api-reference-documentation

      As an example, the /api/storage/aggregates endpoint, lists all data aggregates in the cluster. Below is an example response from this endpoint:

      {\n  \"records\": [\n    {\n      \"uuid\": \"3e59547d-298a-4967-bd0f-8ae96cead08c\",\n      \"name\": \"umeng_aff300_aggr2\",\n      \"space\": {\n        \"block_storage\": {\n          \"size\": 8117898706944,\n          \"available\": 4889853616128\n        }\n      },\n      \"state\": \"online\",\n      \"volume_count\": 36\n    }\n  ]\n}\n

      The Rest collector will take this document, extract the records section and convert the metrics above into: name, space.block_storage.size, space.block_storage.available, state and volume_count. Metric names will be taken, as is, unless you specify a short display name. See counters for more details.

      "},{"location":"configure-rest/#parameters","title":"Parameters","text":"

      The parameters of the collector are distributed across three files:

      • Harvest configuration file (default: harvest.yml)
      • Rest configuration file (default: conf/rest/default.yaml)
      • Each object has its own configuration file (located in conf/rest/$version/)

      Except for addr and datacenter, all other parameters of the Rest collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level ones. This allows you to configure each object individually, or use the same parameters for all objects.

      The full set of parameters are described below.

      "},{"location":"configure-rest/#collector-configuration-file","title":"Collector configuration file","text":"

      This configuration file contains a list of objects that should be collected and the filenames of their templates ( explained in the next section).

      Additionally, this file contains the parameters that are applied as defaults to all objects. As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well.

      parameter type description default client_timeout duration (Go-syntax) how long to wait for server responses 30s jitter duration (Go-syntax), optional Each Harvest collector runs independently, which means that at startup, each collector may send its REST queries at nearly the same time. To spread out the collector startup times over a broader period, you can use jitter to randomly distribute collector startup across a specified duration. For example, a jitter of 1m starts each collector after a random delay between 0 and 60 seconds. For more details, refer to this discussion. schedule list, required how frequently to retrieve metrics from ONTAP - data duration (Go-syntax) how frequently this collector/object should retrieve metrics from ONTAP 3 minutes

      The template should define objects in the objects section. Example:

      objects:\n  Aggregate: aggr.yaml\n

      For each object, we define the filename of the object configuration file. The object configuration files are located in subdirectories matching the ONTAP version that was used to create these files. It is possible to have multiple version-subdirectories for multiple ONTAP versions. At runtime, the collector will select the object configuration file that closest matches the version of the target ONTAP system.

      "},{"location":"configure-rest/#object-configuration-file","title":"Object configuration file","text":"

      The Object configuration file (\"subtemplate\") should contain the following parameters:

      parameter type description default name string, required display name of the collector that will collect this object query string, required REST endpoint used to issue a REST request object string, required short name of the object counters string list of counters to collect (see notes below) plugins list plugins and their parameters to run on the collected data export_options list parameters to pass to exporters (see notes below)"},{"location":"configure-rest/#template-example","title":"Template Example:","text":"
      name:                     Volume\nquery:                    api/storage/volumes\nobject:                   volume\n\ncounters:\n  - ^^name                                        => volume\n  - ^^svm.name                                    => svm\n  - ^aggregates.#.name                            => aggr\n  - ^anti_ransomware.state                        => antiRansomwareState\n  - ^state                                        => state\n  - ^style                                        => style\n  - space.available                               => size_available\n  - space.overwrite_reserve                       => overwrite_reserve_total\n  - space.overwrite_reserve_used                  => overwrite_reserve_used\n  - space.percent_used                            => size_used_percent\n  - space.physical_used                           => space_physical_used\n  - space.physical_used_percent                   => space_physical_used_percent\n  - space.size                                    => size\n  - space.used                                    => size_used\n  - hidden_fields:\n      - anti_ransomware.state\n      - space\n  - filter:\n      - name=*harvest*\n\nplugins:\n  - LabelAgent:\n      exclude_equals:\n        - style `flexgroup_constituent`\n\nexport_options:\n  instance_keys:\n    - aggr\n    - style\n    - svm\n    - volume\n  instance_labels:\n    - antiRansomwareState\n    - state\n
      "},{"location":"configure-rest/#counters","title":"Counters","text":"

      This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or histograms. The exact property of each counter is fetched from ONTAP and updated periodically.

      The display name of a counter can be changed with => (e.g., space.block_storage.size => space_total).

      Counters that are stored as labels will only be exported if they are included in the export_options section.

      The counters section allows you to specify hidden_fields and filter parameters. Please find the detailed explanation below.

      "},{"location":"configure-rest/#hidden_fields","title":"Hidden_fields","text":"

      There are some fields that ONTAP will not return unless you explicitly ask for them, even when using the URL parameter fields=**. hidden_fields is how you tell ONTAP which additional fields it should include in the REST response.

      "},{"location":"configure-rest/#filter","title":"Filter","text":"

      The filter is used to constrain the data returned by the endpoint, allowing for more targeted data retrieval. The filtering uses ONTAP's REST record filtering. The example above asks ONTAP to only return records where a volume's name matches *harvest*.

      If you're familiar with ONTAP's REST record filtering, the example above would become name=*harvest* and appended to the final URL like so:

      https://CLUSTER_IP/api/storage/volumes?fields=*,anti_ransomware.state,space&name=*harvest*\n

      Refer to the ONTAP API specification, sections: query parameters and record filtering, for more details.

      "},{"location":"configure-rest/#export_options","title":"Export_options","text":"

      Parameters in this section tell the exporters how to handle the collected data.

      There are two different kinds of time-series that Harvest publishes: metrics and instance labels.

      • Metrics are numeric data with associated labels (key-value pairs). E.g. volume_read_ops_total{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\"} 123. The volume_read_ops_total metric is exporting three labels: cluster, node, and volume and the metric value is 123.
      • Instance labels are named after their associated config object (e.g., volume_labels, qtree_labels, etc.). There will be one instance label for each object instance, and each instance label will contain a set of associated labels (key-value pairs) that are defined in the templates instance_labels parameter. E.g. volume_labels{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\", svm=\"svm1\"} 1. The volume_labels instance label is exporting four labels: cluster, node, volume, and svm. Instance labels always export a metric value of 1.

      The export_options section allows you to define how to export these time-series.

      • instances_keys (list): display names of labels to export to both metric and instance labels. For example, if you list the svm counter under instances_keys, that key-value will be included in all time-series metrics and all instance-labels.
      • instance_labels (list): display names of labels to export with the corresponding instance label config object. For example, if you want the volume counter to be exported with the volume_labels instance label, you would list volume in the instance_labels section.
      • include_all_labels (bool): exports all labels for all time-series metrics. If there are no metrics defined in the template, this option will do nothing. This option also overrides the previous two parameters. See also collect_only_labels.
      "},{"location":"configure-rest/#endpoints","title":"Endpoints","text":"

      In Harvest REST templates, endpoints are additional queries that enhance the data collected from the main query. The main query, identified by the query parameter, is the primary REST API for data collection. For example, the main query for a disk object is api/storage/disks. Typically endpoints are used to query the private CLI to add metrics that are not available via ONTAP's public REST API.

      Within the endpoints section of a Harvest REST template, you can define multiple endpoint entries. Each entry supports its own query and associated counters, allowing you to collect additional metrics or labels from various API. These additional metrics or labels are associated with the main dataset via a key. The key is denoted by the ^^ notation in the counters of both the main query and the endpoints.

      In the example below, the endpoints section makes an additional query to api/private/cli/disk, which collects metrics such as stats_io_kbps, stats_sectors_read, and stats_sectors_written. The uuid is the key that links the data from the api/storage/disks and api/private/cli/disk API. The type label from the api/private/cli/disk endpoint is included as outlined in the export_options.

      name:             Disk\nquery:            api/storage/disks\nobject:           disk\n\ncounters:\n  - ^^uid                       => uuid\n  - ^bay                        => shelf_bay\n  - ^container_type\n  - ^home_node.name             => owner_node\n  - ^model\n  - ^name                       => disk\n  - ^node.name                  => node\n  - ^node.uuid\n  - ^outage.reason              => outage\n  - ^serial_number\n  - ^shelf.uid                  => shelf\n  - ^state\n  - bytes_per_sector            => bytes_per_sector\n  - sector_count                => sectors\n  - stats.average_latency       => stats_average_latency\n  - stats.power_on_hours        => power_on_hours\n  - usable_size\n\nendpoints:\n  - query: api/private/cli/disk\n    counters:\n      - ^^uid                   => uuid\n      - ^type\n      - disk_io_kbps_total      => stats_io_kbps\n      - sectors_read            => stats_sectors_read\n      - sectors_written         => stats_sectors_written\n\nplugins:\n  - Disk\n  - LabelAgent:\n      value_to_num:\n        - new_status outage - - `0` #ok_value is empty value, '-' would be converted to blank while processing.\n      join:\n        - index `_` node,disk\n  - MetricAgent:\n      compute_metric:\n        - uptime MULTIPLY stats.power_on_hours 60 60 #convert to second for zapi parity\n\nexport_options:\n  instance_keys:\n    - disk\n    - index\n    - node\n  instance_labels:\n    - container_type\n    - failed\n    - model\n    - outage\n    - owner_node\n    - serial_number\n    - shared\n    - shelf\n    - shelf_bay\n    - type\n
      "},{"location":"configure-rest/#restperf-collector","title":"RestPerf Collector","text":"

      RestPerf collects performance metrics from ONTAP systems using the REST protocol. The collector is designed to be easily extendable to collect new objects or to collect additional counters from already configured objects.

      This collector is an extension of the Rest collector. The major difference between them is that RestPerf collects only the performance (perf) APIs. Additionally, RestPerf always calculates final values from the deltas of two subsequent polls.

      "},{"location":"configure-rest/#metrics_1","title":"Metrics","text":"

      RestPerf metrics are calculated the same as ZapiPerf metrics. More details about how performance metrics are calculated can be found here.

      "},{"location":"configure-rest/#parameters_1","title":"Parameters","text":"

      The parameters of the collector are distributed across three files:

      • Harvest configuration file (default: harvest.yml)
      • RestPerf configuration file (default: conf/restperf/default.yaml)
      • Each object has its own configuration file (located in conf/restperf/$version/)

      Except for addr, datacenter and auth_style, all other parameters of the RestPerf collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level file. This allows the user to configure each objects individually, or use the same parameters for all objects.

      The full set of parameters are described below.

      "},{"location":"configure-rest/#restperf-configuration-file","title":"RestPerf configuration file","text":"

      This configuration file (the \"template\") contains a list of objects that should be collected and the filenames of their configuration (explained in the next section).

      Additionally, this file contains the parameters that are applied as defaults to all objects. (As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well).

      parameter type description default use_insecure_tls bool, optional skip verifying TLS certificate of the target system false client_timeout duration (Go-syntax) how long to wait for server responses 30s latency_io_reqd int, optional threshold of IOPs for calculating latency metrics (latencies based on very few IOPs are unreliable) 10 jitter duration (Go-syntax), optional Each Harvest collector runs independently, which means that at startup, each collector may send its REST queries at nearly the same time. To spread out the collector startup times over a broader period, you can use jitter to randomly distribute collector startup across a specified duration. For example, a jitter of 1m starts each collector after a random delay between 0 and 60 seconds. For more details, refer to this discussion. schedule list, required the poll frequencies of the collector/object, should include exactly these three elements in the exact same other: - counter duration (Go-syntax) poll frequency of updating the counter metadata cache 20 minutes - instance duration (Go-syntax) poll frequency of updating the instance cache 10 minutes - data duration (Go-syntax) poll frequency of updating the data cache Note Harvest allows defining poll intervals on sub-second level (e.g. 1ms), however keep in mind the following:
      • API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than client_timeout.
      • Small poll intervals will create significant workload on the ONTAP system, as many counters are aggregated on-demand.
      • Some metric values become less significant if they are calculated for very short intervals (e.g. latencies)
      1 minute

      The template should define objects in the objects section. Example:

      objects:\n  SystemNode: system_node.yaml\n  HostAdapter: hostadapter.yaml\n

      Note that for each object we only define the filename of the object configuration file. The object configuration files are located in subdirectories matching to the ONTAP version that was used to create these files. It is possible to have multiple version-subdirectories for multiple ONTAP versions. At runtime, the collector will select the object configuration file that closest matches to the version of the target ONTAP system. (A mismatch is tolerated since RestPerf will fetch and validate counter metadata from the system.)

      "},{"location":"configure-rest/#object-configuration-file_1","title":"Object configuration file","text":"

      Refer Object configuration file

      "},{"location":"configure-rest/#counters_1","title":"Counters","text":"

      See Counters

      Some counters require a \"base-counter\" for post-processing. If the base-counter is missing, RestPerf will still run, but the missing data won't be exported.

      "},{"location":"configure-rest/#export_options_1","title":"Export_options","text":"

      See Export Options

      "},{"location":"configure-rest/#ontap-private-cli","title":"ONTAP Private CLI","text":"

      The ONTAP private CLI allows for more granular control and access to non-public counters. It can be used to fill gaps in the REST API, especially in cases where certain data is not yet available through the REST API. Harvest's REST collector can make full use of ONTAP's private CLI. This means when ONTAP's public REST API is missing counters, Harvest can still collect them as long as those counters are available via ONTAP's CLI.

      For more information on using the ONTAP private CLI with the REST API, you can refer to the following resources:

      • NetApp Documentation: Accessing ONTAP CLI through REST APIs
      • NetApp Blog: Private CLI Passthrough with ONTAP REST API
      "},{"location":"configure-rest/#creating-templates-that-use-ontaps-private-cli","title":"Creating Templates That Use ONTAP's Private CLI","text":"

      Let's take an example of how we can make Harvest use the system fru-check show CLI command.

      system fru-check show\n

      REST APIs endpoint:

      /api/private/cli/system/fru-check?fields=node,fru_name,fru_status\n

      Converting the CLI command system fru-check show for use with a private CLI REST API can be achieved by adhering to the path rules outlined in the ONTAP documentation. Generally, this involves substituting all spaces within the CLI command with a forward slash (/), and converting the ONTAP CLI verb into the corresponding REST verb.

      The show command gets converted to the HTTP method GET call. From the CLI, look at the required field names and pass them as a comma-separated value in fields= in the API endpoint.

      Note: If the field name contains a hyphen (-), it should be converted to an underscore (_) in the REST API field. For example, fru-name becomes fru_name. ONTAP is flexible with the input format and can freely convert between hyphen (-) and underscore (_) forms. However, when it comes to output, ONTAP returns field names with underscores. For compatibility and consistency, it is mandatory to use underscores in field names when working with Harvest REST templates for ONTAP private CLI.

      "},{"location":"configure-rest/#advanced-and-diagnostic-mode-commands","title":"Advanced and Diagnostic Mode Commands","text":"

      The CLI pass through allows you to execute advanced and diagnostic mode CLI commands by including the privilege_level field in your request under the filter setting like so:

      counters:\n  - filter:\n      - privilege_level=diagnostic\n

      "},{"location":"configure-rest/#creating-a-harvest-template-for-private-cli","title":"Creating a Harvest Template for Private CLI","text":"

      Here's a Harvest template that uses ONTAP's private CLI to collect field-replaceable units (FRU) counters by using ONTAP's CLI command system fru-check show

      name:                         FruCheck\nquery:                        api/private/cli/system/fru-check\nobject:                       fru_check\n\ncounters:\n  - ^^node\n  - ^^serial_number              => serial_number\n  - ^fru_name                    => name\n  - ^fru_status                  => status\n\nexport_options:\n  instance_keys:\n    - node\n    - serial_number\n  instance_labels:\n    - name\n    - status\n

      In this template, the query field specifies the private CLI command to be used (system fru-check show). The counters field maps the output of the private CLI command to the fields of the fru_check object. To identify the ONTAP counter names (the left side of the '=>' symbol in the template, such as fru_name), you can establish an SSH connection to your ONTAP cluster. Once connected, leverage ONTAP's command completion functionality to reveal the counter names. For instance, you can type system fru-check show -fields, then press the '?' key. This will display a list of ONTAP field names, as demonstrated below.

      cluster-01::> system fru-check show -fields ?\n  node                        Node\n  serial-number               FRU Serial Number\n  fru-name                    FRU Name\n  fru-type                    FRU Type\n  fru-status                  Status\n  display-name                Display Name\n  location                    Location\n  additional-info             Additional Info\n  reason                      Details\n

      The export_options field specifies how the data should be exported. The instance_keys field lists the fields that will be added as labels to all exported instances of the fru_check object. The instance_labels field lists the fields that should be included as labels in the exported data.

      The output of this template would look like:

      fru_check_labels{cluster=\"umeng-aff300-01-02\",datacenter=\"u2\",name=\"DIMM-1\",node=\"umeng-aff300-02\",serial_number=\"s2\",status=\"pass\"} 1.0\nfru_check_labels{cluster=\"umeng-aff300-01-02\",datacenter=\"u2\",name=\"PCIe Devices\",node=\"umeng-aff300-02\",serial_number=\"s1\",status=\"pass\"} 1.0\n
      "},{"location":"configure-rest/#partial-aggregation","title":"Partial Aggregation","text":"

      There are instances when ONTAP may report partial aggregate results for certain objects (for example, during a node outage). In such cases, the RestPerf Collector will skip the reporting of performance counters for the affected objects.

      To determine whether partial aggregation affects an object, check the numPartials entry in the Harvest logs. If numPartials is greater than zero, it indicates that partial aggregations have occurred for that object. e.g. Collected Poller=aff-251 collector=RestPerf:NFSv4 instances=56 numPartials=15

      "},{"location":"configure-storagegrid/","title":"StorageGRID","text":""},{"location":"configure-storagegrid/#storagegrid-collector","title":"StorageGRID Collector","text":"

      The StorageGRID collector uses REST calls to collect data from StorageGRID systems.

      "},{"location":"configure-storagegrid/#target-system","title":"Target System","text":"

      All StorageGRID versions are supported, however the default configuration files may not completely match with older systems.

      "},{"location":"configure-storagegrid/#requirements","title":"Requirements","text":"

      No SDK or other requirements. It is recommended to create a read-only user for Harvest on the StorageGRID system (see prepare monitored clusters for details)

      "},{"location":"configure-storagegrid/#metrics","title":"Metrics","text":"

      The collector collects a dynamic set of metrics via StorageGRID's REST API. StorageGRID returns JSON documents and Harvest allows you to define templates to extract values from the JSON document via a dot notation path. You can view StorageGRID's full set of REST APIs by visiting https://$STORAGE_GRID_HOSTNAME/grid/apidocs.html

      As an example, the /grid/accounts-cache endpoint, lists the tenant accounts in the cache and includes additional information, such as objectCount and dataBytes. Below is an example response from this endpoint:

      {\n  \"data\": [\n    {\n      \"id\": \"95245224059574669217\",\n      \"name\": \"foople\",\n      \"policy\": {\n        \"quotaObjectBytes\": 50000000000\n      },\n      \"objectCount\": 6,\n      \"dataBytes\": 10473454261\n    }\n  ]\n}\n

      The StorageGRID collector will take this document, extract the data section and convert the metrics above into: name, policy.quotaObjectBytes, objectCount, and dataBytes. Metric names will be taken, as is, unless you specify a short display name. See counters for more details.

      "},{"location":"configure-storagegrid/#parameters","title":"Parameters","text":"

      The parameters of the collector are distributed across three files:

      • Harvest configuration file (default: harvest.yml)
      • StorageGRID configuration file (default: conf/storagegrid/default.yaml)
      • Each object has its own configuration file (located in conf/storagegrid/$version/)

      Except for addr and datacenter, all other parameters of the StorageGRID collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level ones. This allows you to configure each object individually, or use the same parameters for all objects.

      The full set of parameters are described below.

      "},{"location":"configure-storagegrid/#harvest-configuration-file","title":"Harvest configuration file","text":"

      Parameters in the poller section should define the following required parameters.

      parameter type description default Poller name (header) string, required Poller name, user-defined value addr string, required IPv4, IPv6 or FQDN of the target system datacenter string, required Datacenter name, user-defined value username, password string, required StorageGRID username and password with at least Tenant accounts permissions collectors list, required Name of collector to run for this poller, use StorageGrid for this collector"},{"location":"configure-storagegrid/#storagegrid-configuration-file","title":"StorageGRID configuration file","text":"

      This configuration file contains a list of objects that should be collected and the filenames of their templates ( explained in the next section).

      Additionally, this file contains the parameters that are applied as defaults to all objects. As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well.

      parameter type description default client_timeout duration (Go-syntax) how long to wait for server responses 30s schedule list, required how frequently to retrieve metrics from StorageGRID - data duration (Go-syntax) how frequently this collector/object should retrieve metrics from StorageGRID 5 minutes only_cluster_instance bool, optional don't require instance key. assume the only instance is the cluster itself

      The template should define objects in the objects section. Example:

      objects:\n  Tenant: tenant.yaml\n

      For each object, we define the filename of the object configuration file. The object configuration files are located in subdirectories matching the StorageGRID version that was used to create these files. It is possible to have multiple version-subdirectories for multiple StorageGRID versions. At runtime, the collector will select the object configuration file that closest matches the version of the target StorageGRID system.

      "},{"location":"configure-storagegrid/#object-configuration-file","title":"Object configuration file","text":"

      The Object configuration file (\"subtemplate\") should contain the following parameters:

      parameter type description default name string, required display name of the collector that will collect this object query string, required REST endpoint used to issue a REST request object string, required short name of the object api string StorageGRID REST endpoint version to use, overrides default management API version 3 counters list list of counters to collect (see notes below) plugins list plugins and their parameters to run on the collected data export_options list parameters to pass to exporters (see notes below)"},{"location":"configure-storagegrid/#counters","title":"Counters","text":"

      This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or histograms. The exact property of each counter is fetched from StorageGRID and updated periodically.

      The display name of a counter can be changed with => (e.g., policy.quotaObjectBytes => logical_quota).

      Counters that are stored as labels will only be exported if they are included in the export_options section.

      "},{"location":"configure-storagegrid/#export_options","title":"Export_options","text":"

      Parameters in this section tell the exporters how to handle the collected data.

      There are two different kinds of time-series that Harvest publishes: metrics and instance labels.

      • Metrics are numeric data with associated labels (key-value pairs). E.g. volume_read_ops_total{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\"} 123. The volume_read_ops_total metric is exporting three labels: cluster, node, and volume and the metric value is 123.
      • Instance labels are named after their associated config object (e.g., volume_labels, qtree_labels, etc.). There will be one instance label for each object instance, and each instance label will contain a set of associated labels (key-value pairs) that are defined in the templates instance_labels parameter. E.g. volume_labels{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\", svm=\"svm1\"} 1. The volume_labels instance label is exporting four labels: cluster, node, volume, and svm. Instance labels always export a metric value of 1.

      The export_options section allows you to define how to export these time-series.

      • instances_keys (list): display names of labels to export to both metric and instance labels. For example, if you list the svm counter under instances_keys, that key-value will be included in all time-series metrics and all instance-labels.
      • instance_labels (list): display names of labels to export with the corresponding instance label config object. For example, if you want the volume counter to be exported with the volume_labels instance label, you would list volume in the instance_labels section.
      • include_all_labels (bool): exports all labels for all time-series metrics. If there are no metrics defined in the template, this option will do nothing. This option also overrides the previous two parameters. See also collect_only_labels.
      "},{"location":"configure-templates/","title":"Templates","text":""},{"location":"configure-templates/#creatingediting-templates","title":"Creating/editing templates","text":"

      This document covers how to use Collector and Object templates to extend Harvest.

      1. How to add a new object template
      2. How to extend an existing object template

      There are a couple of ways to learn about ZAPIs and their attributes:

      • ONTAP's documentation
      • Using Harvest's zapi tool to explore available APIs and metrics on your cluster. Examples:
      $ harvest zapi --poller <poller> show apis\n  # will print list of apis that are available\n  # usually apis with the \"get-iter\" suffix can provide useful metrics\n$ harvest zapi --poller <poller> show attrs --api volume-get-iter\n  # will print the attribute tree of the API\n$ harvest zapi --poller <poller> show data --api volume-get-iter\n  # will print raw data of the API attribute tree\n

      (Replace <poller> with the name of a poller that can connect to an ONTAP system.)

      "},{"location":"configure-templates/#conf-path","title":"Conf Path","text":"

      The conf path is the colon-separated list of directories that Harvest searches to load templates. Harvest walks each directory in order, stopping at the first one that contains the desired template. The default value of confpath is conf, meaning that only the conf directory is searched for templates.

      There are two ways to change the conf path.

      • You can specify the -confpath command line argument to bin/harvest or bin/poller, e.g. -confpath customconf:conf. Harvest will search the customconf directory followed by the conf directory.

      • You can specify the conf_path parameter in the Pollers section of your harvest.yml file, e.g.

      Pollers:\n  netapp-cluster1: \n    datacenter: dc-1\n    addr: 10.193.48.163\n    conf_path: customconf:/etc/harvest/conf:conf\n

      This conf_path example will search for templates in this order, stopping at the first one that contains the template.

      1. local directory customconf
      2. absolute directory /etc/harvest/conf
      3. local directory conf

      Use the conf path to isolate your edits and extensions to Harvest's builtin templates. This ensures that your customizations won't be affected when you upgrade Harvest.

      When using a custom confpath, make sure your custom directories have the same structure as the default conf directory. In the example below, four template modifications have been setup in the /etc/harvest/customconf directory.

      The poller's conf_path parameter is set to /etc/harvest/customconf:conf to use these modified templates. Harvest will use the custom templates when they match and the default templates otherwise.

      See issue #2330 for more examples.

      # tree /etc/harvest/customconf\n\n/etc/harvest/customconf\n\u251c\u2500\u2500 rest\n\u2502   \u251c\u2500\u2500 9.12.0\n\u2502   \u2502 \u251c\u2500\u2500 aggr.yaml\n\u2502   \u2502 \u2514\u2500\u2500 volume.yaml\n\u251c\u2500\u2500 restperf\n\u2502   \u251c\u2500\u2500 9.13.0\n\u2502   \u2502 \u2514\u2500\u2500 qtree.yaml\n\u251c\u2500\u2500 zapi\n\u2514\u2500\u2500 zapiperf\n    \u251c\u2500\u2500 cdot\n    \u2502 \u2514\u2500\u2500 9.8.0\n    \u2502     \u2514\u2500\u2500 qtree.yaml\n
      "},{"location":"configure-templates/#collector-templates","title":"Collector templates","text":"

      Collector templates define which set of objects Harvest should collect from the system being monitored. In your harvest.yml configuration file, when you say that you want to use a Zapi collector, that collector will read the matching conf/zapi/default.yaml - same with ZapiPerf, it will read the conf/zapiperf/default.yaml file. Below is a snippet from conf/zapi/default.yaml. Each object is mapped to a corresponding object template file. For example, the Node object searches for the most appropriate version of the node.yaml file in the conf/zapi/cdot/** directory.

      collector:          Zapi\nobjects:\n  Node:             node.yaml\n  Aggregate:        aggr.yaml\n  Volume:           volume.yaml\n  Disk:             disk.yaml\n

      Each collector will also check if a matching file named, custom.yaml exists, and if it does, it will read that file and merge it with default.yaml. The custom.yaml file should be located beside the matching default.yaml file. ( eg. conf/zapi/custom.yaml is beside conf/zapi/default.yaml).

      Let's take a look at some examples.

      1. Define a poller that uses the default Zapi collector. Using the default template is the easiest and most used option.
      Pollers:\n  jamaica:\n    datacenter: munich\n    addr: 10.10.10.10\n    collectors:\n      - Zapi # will use conf/zapi/default.yaml and optionally merge with conf/zapi/custom.yaml\n
      1. Define a poller that uses the Zapi collector, but with a custom template file:
      Pollers:\n  jamaica:\n    datacenter: munich\n    addr: 10.10.10.10\n    collectors:\n      - ZapiPerf:\n          - limited.yaml # will use conf/zapiperf/limited.yaml\n        # more templates can be added, they will be merged\n
      "},{"location":"configure-templates/#object-templates","title":"Object Templates","text":"

      Object templates (example: conf/zapi/cdot/9.8.0/lun.yaml) describe what to collect and export. These templates are used by collectors to gather metrics and send them to your time-series db.

      Object templates are made up of the following parts:

      1. the name of the object (or resource) to collect
      2. the ZAPI or REST query used to collect the object
      3. a list of object counters to collect and how to export them

      Instead of editing one of the existing templates, it's better to extend one of them. That way, your custom template will not be overwritten when upgrading Harvest. For example, if you want to extend conf/zapi/cdot/9.8.0/aggr.yaml, first create a copy (e.g., conf/zapi/cdot/9.8.0/custom_aggr.yaml), and then tell Harvest to use your custom template by adding these lines to conf/zapi/custom.yaml:

      objects:\n  Aggregate: custom_aggr.yaml\n

      After restarting your pollers, aggr.yaml and custom_aggr.yaml will be merged.

      "},{"location":"configure-templates/#create-a-new-object-template","title":"Create a new object template","text":"

      In this example, imagine that Harvest doesn't already collect environment sensor data, and you wanted to collect it. Sensor does come from the environment-sensors-get-iter ZAPI. Here are the steps to add a new object template.

      Create the file conf/zapi/cdot/9.8.0/sensor.yaml (optionally replace 9.8.0 with the earliest version of ONTAP that supports sensor data. Refer to Harvest Versioned Templates for more information. Add the following content to your new sensor.yaml file.

      name: Sensor                      # this name must match the key in your custom.yaml file\nquery: environment-sensors-get-iter\nobject: sensor\n\nmetric_type: int64\n\ncounters:\n  environment-sensors-info:\n    - critical-high-threshold    => critical_high\n    - critical-low-threshold     => critical_low\n    - ^discrete-sensor-state     => discrete_state\n    - ^discrete-sensor-value     => discrete_value\n    - ^^node-name                => node\n    - ^^sensor-name              => sensor\n    - ^sensor-type               => type\n    - ^threshold-sensor-state    => threshold_state\n    - threshold-sensor-value     => threshold_value\n    - ^value-units               => unit\n    - ^warning-high-threshold    => warning_high\n    - ^warning-low-threshold     => warning_low\n\nexport_options:\n  include_all_labels: true\n
      "},{"location":"configure-templates/#enable-the-new-object-template","title":"Enable the new object template","text":"

      To enable the new sensor object template, create the conf/zapi/custom.yaml file with the lines shown below.

      objects:\n  Sensor: sensor.yaml                 # this key must match the name in your sensor.yaml file\n

      The Sensor key used in the custom.yaml must match the name defined in the sensor.yaml file. That mapping is what connects this object with its template. In the future, if you add more object templates, you can add those in your existing custom.yaml file.

      "},{"location":"configure-templates/#test-your-object-template-changes","title":"Test your object template changes","text":"

      Test your new Sensor template with a single poller like this:

      ./bin/harvest start <poller> --foreground --verbose --collectors Zapi --objects Sensor\n

      Replace <poller> with the name of one of your ONTAP pollers.

      Once you have confirmed that the new template works, restart any already running pollers that you want to use the new template(s).

      "},{"location":"configure-templates/#check-the-metrics","title":"Check the metrics","text":"

      If you are using the Prometheus exporter, you can scrape the poller's HTTP endpoint with curl or a web browser. E.g., my poller exports its data on port 15001. Adjust as needed for your exporter.

      curl -s 'http://localhost:15001/metrics' | grep ^sensor_  # sensor_ name matches the object: value in your sensor.yaml file.\n\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",critical_high=\"3664\",node=\"shopfloor-02\",sensor=\"P3.3V STBY\",type=\"voltage\",warning_low=\"3040\",critical_low=\"2960\",threshold_state=\"normal\",unit=\"mV\",warning_high=\"3568\"} 3280\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",sensor=\"P1.2V STBY\",type=\"voltage\",threshold_state=\"normal\",warning_high=\"1299\",warning_low=\"1105\",critical_low=\"1086\",node=\"shopfloor-02\",critical_high=\"1319\",unit=\"mV\"} 1193\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",unit=\"mV\",critical_high=\"15810\",critical_low=\"0\",node=\"shopfloor-02\",sensor=\"P12V STBY\",type=\"voltage\",threshold_state=\"normal\"} 11842\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",sensor=\"P12V STBY Curr\",type=\"current\",threshold_state=\"normal\",unit=\"mA\",critical_high=\"3182\",critical_low=\"0\",node=\"shopfloor-02\"} 748\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",critical_low=\"1470\",node=\"shopfloor-02\",sensor=\"Sysfan2 F2 Speed\",type=\"fan\",threshold_state=\"normal\",unit=\"RPM\",warning_low=\"1560\"} 2820\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",sensor=\"PSU2 Fan1 Speed\",type=\"fan\",threshold_state=\"normal\",unit=\"RPM\",warning_low=\"4600\",critical_low=\"4500\",node=\"shopfloor-01\"} 6900\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",sensor=\"PSU1 InPwr Monitor\",type=\"unknown\",threshold_state=\"normal\",unit=\"mW\",node=\"shopfloor-01\"} 132000\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",critical_high=\"58\",type=\"thermal\",unit=\"C\",warning_high=\"53\",critical_low=\"0\",node=\"shopfloor-01\",sensor=\"Bat Temp\",threshold_state=\"normal\",warning_low=\"5\"} 24\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",critical_high=\"9000\",node=\"shopfloor-01\",sensor=\"Bat Charge Volt\",type=\"voltage\",threshold_state=\"normal\",unit=\"mV\",warning_high=\"8900\"} 8200\nsensor_value{datacenter=\"WDRF\",cluster=\"shopfloor\",node=\"shopfloor-02\",sensor=\"PSU1 InPwr Monitor\",type=\"unknown\",threshold_state=\"normal\",unit=\"mW\"} 132000\n
      "},{"location":"configure-templates/#extend-an-existing-object-template","title":"Extend an existing object template","text":""},{"location":"configure-templates/#how-to-extend-a-restrestperfstoragegridems-collectors-existing-object-template","title":"How to extend a Rest/RestPerf/StorageGRID/Ems collector's existing object template","text":"

      Instead of editing one of the existing templates, it's better to copy one and edit the copy. That way, your custom template will not be overwritten when upgrading Harvest. For example, if you want to change conf/rest/9.12.0/aggr.yaml, first create a copy (e.g., conf/rest/9.12.0/custom_aggr.yaml), then add these lines to conf/rest/custom.yaml:

      objects:\n  Aggregate: custom_aggr.yaml\n

      After restarting pollers, aggr.yaml will be ignored and the new, custom_aggr.yaml subtemplate will be used instead.

      "},{"location":"configure-templates/#how-to-extend-a-zapizapiperf-collectors-existing-object-template","title":"How to extend a Zapi/ZapiPerf collector's existing object template","text":"

      In this example, we want to extend one of the existing object templates that Harvest ships with, e.g. conf/zapi/cdot/9.8.0/lun.yaml and collect additional information as outlined below.

      Let's say you want to extend lun.yaml to:

      1. Increase client_timeout (You want to increase the default timeout of the lun ZAPI because it keeps timing out)
      2. Add additional counters, e.g. multiprotocol-type, application
      3. Add a new counter to the already collected lun metrics using the value_to_num plugin
      4. Add a new application instance_keys and labels to the collected metrics

      Let's assume the existing template is located at conf/zapi/cdot/9.8.0/lun.yaml and contains the following.

      name: Lun\nquery: lun-get-iter\nobject: lun\n\ncounters:\n  lun-info:\n    - ^node\n    - ^path\n    - ^qtree\n    - size\n    - size-used\n    - ^state\n    - ^^uuid\n    - ^volume\n    - ^vserver => svm\n\nplugins:\n  - LabelAgent:\n    # metric label zapi_value rest_value `default_value`\n    value_to_num:\n      - new_status state online online `0`\n    split:\n      - path `/` ,,,lun\n\nexport_options:\n  instance_keys:\n    - node\n    - qtree\n    - lun\n    - volume\n    - svm\n  instance_labels:\n    - state\n

      To extend the out-of-the-box lun.yaml template, create a conf/zapi/custom.yaml file if it doesn't already exist and add the lines shown below:

      objects:\n  Lun: custom_lun.yaml\n

      Create a new object template conf/zapi/cdot/9.8.0/custom_lun.yaml with the lines shown below.

      client_timeout: 5m\ncounters:\n  lun-info:\n    - ^multiprotocol-type\n    - ^application\n\nplugins:\n  - LabelAgent:\n    value_to_num:\n      - custom_status state online online `0`\n\nexport_options:\n  instance_keys:\n    - application\n

      When you restart your pollers, Harvest will take the out-of-the-box template (lun.yaml) and your new one (custom_lun.yaml) and merge them into the following:

      name: Lun\nquery: lun-get-iter\nobject: lun\ncounters:\n  lun-info:\n    - ^node\n    - ^path\n    - ^qtree\n    - size\n    - size-used\n    - ^state\n    - ^^uuid\n    - ^volume\n    - ^vserver => svm\n    - ^multiprotocol-type\n    - ^application\nplugins:\n  LabelAgent:\n    value_to_num:\n      - new_status state online online `0`\n      - custom_status state online online `0`\n    split:\n      - path `/` ,,,lun\nexport_options:\n  instance_keys:\n    - node\n    - qtree\n    - lun\n    - volume\n    - svm\n    - application\nclient_timeout: 5m\n

      To help understand the merging process and the resulting combined template, you can view the result with:

      bin/harvest doctor merge --template conf/zapi/cdot/9.8.0/lun.yaml --with conf/zapi/cdot/9.8.0/custom_lun.yaml\n
      "},{"location":"configure-templates/#replace-an-existing-object-template-for-zapizapiperf-collector","title":"Replace an existing object template for Zapi/ZapiPerf Collector","text":"

      You can only extend existing templates for Zapi/ZapiPerf Collector as explained above. If you need to replace one of the existing object templates, let us know on Discord or GitHub.

      "},{"location":"configure-templates/#harvest-versioned-templates","title":"Harvest Versioned Templates","text":"

      Harvest ships with a set of versioned templates tailored for specific versions of ONTAP. At runtime, Harvest uses a BestFit heuristic to pick the most appropriate template. The BestFit heuristic compares the list of Harvest templates with the ONTAP version and selects the best match. There are versioned templates for both the ZAPI and REST collectors. Below is an example of how the BestFit algorithm works - assume Harvest has these templated versions:

      • 9.6.0
      • 9.6.1
      • 9.8.0
      • 9.9.0
      • 9.10.1

      if you are monitoring a cluster at these versions, Harvest will select the indicated template:

      • ONTAP version 9.4.1, Harvest will select the templates for 9.6.0
      • ONTAP version 9.6.0, Harvest will select the templates for 9.6.0
      • ONTAP version 9.7.X, Harvest will select the templates for 9.6.1
      • ONTAP version 9.12, Harvest will select the templates for 9.10.1
      "},{"location":"configure-templates/#counters","title":"counters","text":"

      This section contains the complete or partial attribute tree of the queried API. Since the collector does not get counter metadata from the ONTAP system, two additional symbols are used for non-numeric attributes:

      • ^ used as a prefix indicates that the attribute should be stored as a label
      • ^^ indicates that the attribute is a label and an instance key (i.e., a label that uniquely identifies an instance, such as name, uuid). If a single label does not uniquely identify an instance, then multiple instance keys should be indicated.

      Additionally, the symbol => can be used to set a custom display name for both instance labels and numeric counters. Example:

      name: Spare\nquery: aggr-spare-get-iter\nobject: spare\ncollect_only_labels: true\ncounters:\n  aggr-spare-disk-info:\n    - ^^disk                                # creates label aggr-disk\n    - ^disk-type                            # creates label aggr-disk-type\n    - ^is-disk-zeroed   => is_disk_zeroed   # creates label is_disk_zeroed\n    - ^^original-owner  => original_owner   # creates label original_owner\nexport_options:\n  instance_keys:\n    - disk\n    - original_owner\n  instance_labels:\n    - disk_type\n    - is_disk_zeroed\n

      Harvest does its best to determine a unique display name for each template's label and metric. Instead of relying on this heuristic, it is better to be explicit in your templates and define a display name using the caret (^) mapping. For example, instead of this:

      aggr-spare-disk-info:\n    - ^^disk\n    - ^disk-type\n

      do this:

      aggr-spare-disk-info:\n    - ^^disk      => disk\n    - ^disk-type  => disk_type\n

      See also #585

      "},{"location":"configure-unix/","title":"Unix","text":"

      This collector polls resource usage by Harvest pollers on the local system. Collector might be extended in the future to monitor any local or remote process.

      "},{"location":"configure-unix/#target-system","title":"Target System","text":"

      The machine where Harvest is running (\"localhost\").

      "},{"location":"configure-unix/#requirements","title":"Requirements","text":"

      Collector requires any OS where the proc-filesystem is available. If you are a developer, you are welcome to add support for other platforms. Currently, supported platforms includes most Unix/Unix-like systems:

      • Android / Termux
      • DragonFly BSD
      • FreeBSD
      • IBM AIX
      • Linux
      • NetBSD
      • Plan9
      • Solaris

      (On FreeBSD and NetBSD the proc-filesystem needs to be manually mounted).

      "},{"location":"configure-unix/#parameters","title":"Parameters","text":"parameter type description default mount_point string, optional path to the proc filesystem `/proc"},{"location":"configure-unix/#metrics","title":"Metrics","text":"

      The Collector follows the Linux proc(5) manual to parse a static set of metrics. Unless otherwise stated, the metric has a scalar value:

      metric type unit description start_time counter, float64 seconds process uptime cpu_percent gauge, float64 percent CPU used since last poll memory_percent gauge, float64 percent Memory used (RSS) since last poll cpu histogram, float64 seconds CPU used since last poll (system, user, iowait) memory histogram, uint64 kB Memory used since last poll (rss, vms, swap, etc) io histogram, uint64 bytecount IOs performed by process:rchar, wchar, read_bytes, write_bytes - read/write IOssyscr, syscw - syscalls for IO operations net histogram, uint64 count/byte Different IO operations over network devices ctx histogram, uint64 count Number of context switched (voluntary, involuntary) threads counter, uint64 count Number of threads fds counter, uint64 count Number of file descriptors

      Additionally, the collector provides the following instance labels:

      label description poller name of the poller pid PID of the poller"},{"location":"configure-unix/#issues","title":"Issues","text":"
      • Collector will fail on WSL because some non-critical files, in the proc-filesystem, are not present.
      "},{"location":"configure-zapi/","title":"ZAPI","text":"

      What about REST?

      ZAPI will reach end of availability in ONTAP 9.13.1 released Q2 2023. Don't worry, Harvest has you covered. Switch to Harvest's REST collectors and collect identical metrics. See REST Strategy for more details.

      "},{"location":"configure-zapi/#zapi-collector","title":"Zapi Collector","text":"

      The Zapi collectors use the ZAPI protocol to collect data from ONTAP systems. The collector submits data as received from the target system, and does not perform any calculations or post-processing. Since the attributes of most APIs have an irregular tree structure, sometimes a plugin will be required to collect all metrics from an API.

      The ZapiPerf collector is an extension of this collector, therefore, they share many parameters and configuration settings.

      "},{"location":"configure-zapi/#target-system","title":"Target System","text":"

      Target system can be any cDot or 7Mode ONTAP system. Any version is supported, however the default configuration files may not completely match with older systems.

      "},{"location":"configure-zapi/#requirements","title":"Requirements","text":"

      No SDK or other requirements. It is recommended to create a read-only user for Harvest on the ONTAP system (see prepare monitored clusters for details)

      "},{"location":"configure-zapi/#metrics","title":"Metrics","text":"

      The collector collects a dynamic set of metrics. Since most ZAPIs have a tree structure, the collector converts that structure into a flat metric representation. No post-processing or calculation is performed on the collected data itself.

      As an example, the aggr-get-iter ZAPI provides the following partial attribute tree:

      aggr-attributes:\n  - aggr-raid-attributes:\n      - disk-count\n  - aggr-snapshot-attributes:\n      - files-total\n

      The Zapi collector will convert this tree into two \"flat\" metrics: aggr_raid_disk_count and aggr_snapshot_files_total. (The algorithm to generate a name for the metrics will attempt to keep it as simple as possible, but sometimes it's useful to manually set a short display name. See counters for more details.

      "},{"location":"configure-zapi/#parameters","title":"Parameters","text":"

      The parameters of the collector are distributed across three files:

      • Harvest configuration file (default: harvest.yml)
      • ZAPI configuration file (default: conf/zapi/default.yaml)
      • Each object has its own configuration file (located in conf/zapi/$version/)

      Except for addr and datacenter, all other parameters of the ZAPI collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level ones. This allows you to configure each object individually, or use the same parameters for all objects.

      The full set of parameters are described below.

      "},{"location":"configure-zapi/#collector-configuration-file","title":"Collector configuration file","text":"

      The parameters are similar to those of the ZapiPerf collector. Parameters different from ZapiPerf:

      parameter type description default jitter duration (Go-syntax), optional Each Harvest collector runs independently, which means that at startup, each collector may send its ZAPI queries at nearly the same time. To spread out the collector startup times over a broader period, you can use jitter to randomly distribute collector startup across a specified duration. For example, a jitter of 1m starts each collector after a random delay between 0 and 60 seconds. For more details, refer to this discussion. schedule required same as for ZapiPerf, but only two elements: instance and data (collector does not run a counter poll) no_max_records bool, optional don't add max-records to the ZAPI request collect_only_labels bool, optional don't look for numeric metrics, only submit labels (suppresses the ErrNoMetrics error) only_cluster_instance bool, optional don't look for instance keys and assume only instance is the cluster itself"},{"location":"configure-zapi/#object-configuration-file","title":"Object configuration file","text":"

      The Zapi collector does not have the parameters instance_key and override parameters. The optional parameter metric_type allows you to override the default metric type (uint64). The value of this parameter should be one of the metric types supported by the matrix data-structure.

      The Object configuration file (\"subtemplate\") should contain the following parameters:

      parameter type description default name string, required display name of the collector that will collect this object query string, required REST endpoint used to issue a REST request object string, required short name of the object counters string list of counters to collect (see notes below) plugins list plugins and their parameters to run on the collected data export_options list parameters to pass to exporters (see notes below)"},{"location":"configure-zapi/#counters","title":"Counters","text":"

      This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or histograms. The exact property of each counter is fetched from ONTAP and updated periodically.

      Some counters require a \"base-counter\" for post-processing. If the base-counter is missing, ZapiPerf will still run, but the missing data won't be exported.

      The display name of a counter can be changed with => (e.g., nfsv3_ops => ops). There's one conversion Harvest does for you by default, the instance_name counter will be renamed to the value of object.

      Counters that are stored as labels will only be exported if they are included in the export_options section.

      "},{"location":"configure-zapi/#export_options","title":"Export_options","text":"

      Parameters in this section tell the exporters how to handle the collected data.

      There are two different kinds of time-series that Harvest publishes: metrics and instance labels.

      • Metrics are numeric data with associated labels (key-value pairs). E.g. volume_read_ops_total{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\"} 123. The volume_read_ops_total metric is exporting three labels: cluster, node, and volume and the metric value is 123.
      • Instance labels are named after their associated config object (e.g., volume_labels, qtree_labels, etc.). There will be one instance label for each object instance, and each instance label will contain a set of associated labels (key-value pairs) that are defined in the templates instance_labels parameter. E.g. volume_labels{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\", svm=\"svm1\"} 1. The volume_labels instance label is exporting four labels: cluster, node, volume, and svm. Instance labels always export a metric value of 1.

      The export_options section allows you to define how to export these time-series.

      • instances_keys (list): display names of labels to export to both metric and instance labels. For example, if you list the svm counter under instances_keys, that key-value will be included in all time-series metrics and all instance-labels.
      • instance_labels (list): display names of labels to export with the corresponding instance label config object. For example, if you want the volume counter to be exported with the volume_labels instance label, you would list volume in the instance_labels section.
      • include_all_labels (bool): exports all labels for all time-series metrics. If there are no metrics defined in the template, this option will do nothing. This option also overrides the previous two parameters. See also collect_only_labels.
      "},{"location":"configure-zapi/#zapiperf-collector","title":"ZapiPerf Collector","text":""},{"location":"configure-zapi/#zapiperf","title":"ZapiPerf","text":"

      ZapiPerf collects performance metrics from ONTAP systems using the ZAPI protocol. The collector is designed to be easily extendable to collect new objects or to collect additional counters from already configured objects.

      This collector is an extension of the Zapi collector. The major difference between them is that ZapiPerf collects only the performance (perf) APIs. Additionally, ZapiPerf always calculates final values from the deltas of two subsequent polls.

      "},{"location":"configure-zapi/#metrics_1","title":"Metrics","text":"

      The collector collects a dynamic set of metrics. The metric values are calculated from two consecutive polls (therefore, no metrics are emitted after the first poll). The calculation algorithm depends on the property and base-counter attributes of each metric, the following properties are supported:

      property formula description raw x = xi no post-processing, value x is submitted as it is delta x = xi - xi-1 delta of two poll values, xi and xi-1 rate x = (xi - xi-1) / (ti - ti-1) delta divided by the interval of the two polls in seconds average x = (xi - xi-1) / (yi - yi-1) delta divided by the delta of the base counter y percent x = 100 * (xi - xi-1) / (yi - yi-1) average multiplied by 100"},{"location":"configure-zapi/#parameters_1","title":"Parameters","text":"

      The parameters of the collector are distributed across three files:

      • Harvest configuration file (default: harvest.yml)
      • ZapiPerf configuration file (default: conf/zapiperf/default.yaml)
      • Each object has its own configuration file (located in conf/zapiperf/cdot/ and conf/zapiperf/7mode/ for cDot and 7Mode systems respectively)

      Except for addr, datacenter and auth_style, all other parameters of the ZapiPerf collector can be defined in either of these three files. Parameters defined in the lower-level file, override parameters in the higher-level file. This allows the user to configure each object individually, or use the same parameters for all objects.

      The full set of parameters are described below.

      "},{"location":"configure-zapi/#zapiperf-configuration-file","title":"ZapiPerf configuration file","text":"

      This configuration file (the \"template\") contains a list of objects that should be collected and the filenames of their configuration (explained in the next section).

      Additionally, this file contains the parameters that are applied as defaults to all objects. (As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well).

      parameter type description default use_insecure_tls bool, optional skip verifying TLS certificate of the target system false client_timeout duration (Go-syntax) how long to wait for server responses 30s batch_size int, optional max instances per API request 500 latency_io_reqd int, optional threshold of IOPs for calculating latency metrics (latencies based on very few IOPs are unreliable) 10 jitter duration (Go-syntax), optional Each Harvest collector runs independently, which means that at startup, each collector may send its ZAPI queries at nearly the same time. To spread out the collector startup times over a broader period, you can use jitter to randomly distribute collector startup across a specified duration. For example, a jitter of 1m starts each collector after a random delay between 0 and 60 seconds. For more details, refer to this discussion. schedule list, required the poll frequencies of the collector/object, should include exactly these three elements in the exact same other: - counter duration (Go-syntax) poll frequency of updating the counter metadata cache (example value: 20m) - instance duration (Go-syntax) poll frequency of updating the instance cache (example value: 10m) - data duration (Go-syntax) poll frequency of updating the data cache (example value: 1m)Note Harvest allows defining poll intervals on sub-second level (e.g. 1ms), however keep in mind the following:
      • API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than client_timeout.
      • Small poll intervals will create significant workload on the ONTAP system, as many counters are aggregated on-demand.
      • Some metric values become less significant if they are calculated for very short intervals (e.g. latencies)

      The template should define objects in the objects section. Example:

      objects:\n  SystemNode: system_node.yaml\n  HostAdapter: hostadapter.yaml\n

      Note that for each object we only define the filename of the object configuration file. The object configuration files are located in subdirectories matching to the ONTAP version that was used to create these files. It is possible to have multiple version-subdirectories for multiple ONTAP versions. At runtime, the collector will select the object configuration file that closest matches to the version of the target ONTAP system. (A mismatch is tolerated since ZapiPerf will fetch and validate counter metadata from the system.)

      "},{"location":"configure-zapi/#object-configuration-file_1","title":"Object configuration file","text":"

      The Object configuration file (\"subtemplate\") should contain the following parameters:

      parameter type description default name string display name of the collector that will collect this object object string short name of the object query string raw object name used to issue a ZAPI request counters list list of counters to collect (see notes below) instance_key string label to use as instance key (either name or uuid) override list of key-value pairs override counter properties that we get from ONTAP (allows circumventing ZAPI bugs) plugins list plugins and their parameters to run on the collected data export_options list parameters to pass to exporters (see notes below)"},{"location":"configure-zapi/#counters_1","title":"counters","text":"

      This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or histograms. The exact property of each counter is fetched from ONTAP and updated periodically.

      Some counters require a \"base-counter\" for post-processing. If the base-counter is missing, ZapiPerf will still run, but the missing data won't be exported.

      The display name of a counter can be changed with => (e.g., nfsv3_ops => ops). There's one conversion Harvest does for you by default, the instance_name counter will be renamed to the value of object.

      Counters that are stored as labels will only be exported if they are included in the export_options section.

      "},{"location":"configure-zapi/#export_options_1","title":"export_options","text":"

      Parameters in this section tell the exporters how to handle the collected data.

      There are two different kinds of time-series that Harvest publishes: metrics and instance labels.

      • Metrics are numeric data with associated labels (key-value pairs). E.g. volume_read_ops_total{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\"} 123. The volume_read_ops_total metric is exporting three labels: cluster, node, and volume and the metric value is 123.
      • Instance labels are named after their associated config object (e.g., volume_labels, nic_labels, etc.). There will be one instance label for each object instance, and each instance label will contain a set of associated labels (key-value pairs) that are defined in the templates instance_labels parameter. E.g. volume_labels{cluster=\"cluster1\", node=\"node1\", volume=\"vol1\", svm=\"svm1\"} 1. The volume_labels instance label is exporting four labels: cluster, node, volume, and svm. Instance labels always export a metric value of 1.
      Instance labels are rarely used with ZapiPerf templates

      They can be useful for exporting labels that are not associated with a metric value.

      The export_options section allows you to define how to export these time-series.

      • instances_keys (list): display names of labels to export to both metric and instance labels. For example, if you list the svm counter under instances_keys, that key-value will be included in all time-series metrics and all instance-labels.
      • instance_labels (list): display names of labels to export with the corresponding instance label config object. For example, if you want the volume counter to be exported with the volume_labels instance label, you would list volume in the instance_labels section.
      "},{"location":"configure-zapi/#filter","title":"Filter","text":"

      This guide provides instructions on how to use the filter feature in ZapiPerf. Filtering is useful when you need to query a subset of instances. For example, suppose you have a small number of high-value volumes from which you want Harvest to collect performance metrics every five seconds. Collecting data from all volumes at this frequency would be too resource-intensive. Therefore, filtering allows you to create/modify a template that includes only the high-value volumes.

      "},{"location":"configure-zapi/#objects-excluding-workload","title":"Objects (Excluding Workload)","text":"

      In ZapiPerf templates, you can set up filters under counters. Wildcards like * are useful if you don't want to specify all instances. Please note, ONTAP Zapi filtering does not support regular expressions, only wildcard matching with *.

      For instance, to filter volume performance instances by instance name where the name is NS_svm_nvme or contains Test, use the following configuration in ZapiPerf volume.yaml under counters:

      counters:\n  ...\n  - filter:\n     - instance_name=NS_svm_nvme|instance_name=*Test*\n

      You can define multiple values within the filter array. These will be interpreted as AND conditions by ONTAP. Alternatively, you can specify a complete expression within a single array element, as described in the ONTAP filtering section below.

      ONTAP Filtering Details

      For a better understanding of ONTAP's filtering mechanism, it allows the use of filter-data for the perf-object-instance-list-info-iter Zapi.

      The filter-data is a string that signifies filter data, adhering to the format: counter_name=counter_value. You can define multiple pairs, separated by either a comma (\",\") or a pipe (\"|\").

      Here's the interpretation:

      • A comma (\",\") signifies an AND operation.
      • A pipe (\"|\") signifies an OR operation.
      • The precedence order is AND first, followed by OR.

      For instance, the filter string instance_name=volA,vserver_name=vs1|vserver_name=vs2 translates to (instance_name=volA && vserver_name=vs1) || (vserver_name=vs2).

      This filter will return instances on Vserver vs1 named volA, and all instances on Vserver vs2.

      "},{"location":"configure-zapi/#workload-templates","title":"Workload Templates","text":"

      Performance workload templates require a different syntax because instances are retrieved from the qos-workload-get-iter ZAPI instead of perf-object-instance-list-info-iter.

      The qos-workload-get-iter ZAPI supports filtering on the following fields:

      • workload-uuid
      • workload-name
      • workload-class
      • wid
      • category
      • policy-group
      • vserver
      • volume
      • lun
      • file
      • qtree
      • read-ahead
      • max-throughput
      • min-throughput
      • is-adaptive
      • is-constituent

      You can include these fields under the filter parameter. For example, to filter Workload performance instances by workload-name where the name contains NS or Test and vserver is vs1, use the following configuration in ZapiPerf workload.yaml under counters:

      counters:\n  ...\n  - filter:\n      - workload-name: \"*NS*|*Test*\"\n      - vserver: vs1\n
      "},{"location":"configure-zapi/#partial-aggregation","title":"Partial Aggregation","text":"

      There are instances when ONTAP may report partial aggregate results for certain objects (for example, during a node outage). In such cases, the ZapiPerf Collector will skip the reporting of performance counters for the affected objects.

      To determine whether partial aggregation affects an object, check the numPartials entry in the Harvest logs. If numPartials is greater than zero, it indicates that partial aggregations have occurred for that object. e.g. Collected Poller=aff-251 collector=ZapiPerf:NFSv4 instances=56 numPartials=15

      "},{"location":"dashboards/","title":"Dashboards","text":"

      Harvest can be used to import dashboards to Grafana.

      The bin/harvest grafana utility requires the address (hostname or IP), port of the Grafana server, and a Grafana API token. The port can be omitted if Grafana is configured to redirect the URL. Use the -d flag to point to the directory that contains the dashboards.

      "},{"location":"dashboards/#grafana-api-token","title":"Grafana API token","text":"

      The utility tool asks for an API token which can be generated from the Grafana web-gui.

      Click on Configuration in the left menu bar (1), click on API Keys (2) and click on the New API Key button. Choose a Key name (3), choose Editor for role (4) and click on add (5). Copy the generated key and paste it in your terminal or add the token to the Tools section of your configuration file. (see below)

      For example, let's say your Grafana server is on http://my.grafana.server:3000 and you want to import the Prometheus-based dashboards from the grafana directory. You would run this:

      bin/harvest grafana import --addr my.grafana.server:3000\n

      Similarly, to export:

      bin/harvest grafana export --addr my.grafana.server:3000 --directory /path/to/export/directory --serverfolder grafanaFolderName\n

      By default, the dashboards are connected to a datasource named prometheus (case-sensitive). This is a datasource of the Prometheus type, defined in Grafana. However, despite the type, the datasource can have any name. If you have a Prometheus type datasource with a name different from prometheus, you can specify this name using the --datasource flag during import/export like this:

      bin/harvest grafana import --addr my.grafana.server:3000 --datasource custom_datasource_name\n
      "},{"location":"dashboards/#cli","title":"CLI","text":"

      The bin/harvest grafana tool includes CLI help when passing the --help command line argument flag like so:

      bin/harvest grafana import --help\n

      The labels argument requires more explanation.

      "},{"location":"dashboards/#labels","title":"Labels","text":"

      The grafana import --labels argument goes hand-in-hand with a poller's Labels section described here. Labels are used to add additional key-value pairs to a poller's metrics.

      When you run bin/harvest grafana import, you may optionally pass a set of labels like so:

      bin/harvest grafana import --labels org --labels dept

      This will cause Harvest to do the following for each dashboard: 1. Parse each dashboard and add a new variable for each label passed on the command line 2. Modify each dashboard variable to use the new label variable(s) in a chained query.

      Here's an example:

      bin/harvest grafana import --labels \"org,dept\"\n

      This will add the Org and Dept variables, as shown below, and modify the existing variables as shown.

      Results in

      "},{"location":"dashboards/#creating-a-custom-grafana-dashboard-with-harvest-metrics-stored-in-prometheus","title":"Creating a Custom Grafana Dashboard with Harvest Metrics Stored in Prometheus","text":"

      This guide assumes that you have already installed and configured Harvest, Prometheus, and Grafana. Instead of creating a new Grafana dashboard from scratch, you might find it more efficient to clone and modify an existing one. Alternatively, you can copy/paste an existing dashboard's panel from an existing dashboard into your new one.

      Harvest collects a wide range of metrics from ONTAP and StorageGRID clusters, which are documented here. These metrics can be used to create dashboards in Grafana.

      "},{"location":"dashboards/#step-1-confirm-that-prometheus-is-receiving-metrics-from-harvest","title":"Step 1: Confirm that Prometheus is Receiving Metrics from Harvest","text":"

      Before creating a dashboard, make sure the relevant metric is present via a PromQL query in the Prometheus UI. If the metric is not present, navigate to Status -> Targets in the Prometheus UI to check the state and any potential errors of the scrape target.

      "},{"location":"dashboards/#step-2-add-prometheus-as-a-data-source-in-grafana","title":"Step 2: Add Prometheus as a Data Source in Grafana","text":"

      If you haven't already, add Prometheus as a data source in Grafana:

      1. In the Grafana UI, go to Configuration > Data Sources.
      2. Click Add data source.
      3. Select Prometheus.
      4. Enter the URL of your Prometheus server, and click Save & Test.
      "},{"location":"dashboards/#step-3-create-a-new-dashboard","title":"Step 3: Create a New Dashboard","text":"

      Now you're ready to create a new dashboard:

      1. In the Grafana UI, click the + icon on the left menu and select Dashboard.
      2. Click Add new panel.
      "},{"location":"dashboards/#step-4-add-queries-to-visualize-harvest-metrics","title":"Step 4: Add Queries to Visualize Harvest Metrics","text":"

      In the new panel, you can add queries to visualize the Harvest metrics:

      1. In the query editor, select Prometheus as the data source.
      2. Write your query to visualize the Harvest counters. Prometheus uses a language called PromQL for querying data. The exact query will depend on the specific Harvest counters you want to visualize. You can refer to the Harvest metrics documentation for details on the available metrics.
      3. Adjust the visualization settings as needed, and click Apply to add the panel to the dashboard.
      "},{"location":"dashboards/#step-5-save-the-dashboard","title":"Step 5: Save the Dashboard","text":"

      Once you're satisfied with the panels and layout of your dashboard, don't forget to save it. You can then share it with others, or keep it for your own use.

      Remember, the specifics of these steps can vary depending on your exact setup and requirements. This guide provides a general approach, but you may need to adjust it for your situation.

      "},{"location":"influxdb-exporter/","title":"InfluxDB Exporter","text":"InfluxDB Install

      The information below describes how to setup Harvest's InfluxDB exporter. If you need help installing or setting up InfluxDB, check out their documentation.

      "},{"location":"influxdb-exporter/#overview","title":"Overview","text":"

      The InfluxDB Exporter will format metrics into the InfluxDB's line protocol and write it into a bucket. The Exporter is compatible with InfluxDB v2.0. For explanation about bucket, org and precision, see InfluxDB API documentation.

      If you are monitoring both CDOT and 7mode clusters, it is strongly recommended to use two different buckets.

      "},{"location":"influxdb-exporter/#parameters","title":"Parameters","text":"

      Overview of all parameters is provided below. Only one of url or addr should be provided and at least one of them is required. If addr is specified, it should be a valid TCP address or hostname of the InfluxDB server and should not include the scheme. When using addr, the bucket, org, and token key/values are required.

      addr only works with HTTP. If you need to use HTTPS, you should use url instead.

      If url is specified, you must add all arguments to the url. Harvest will do no additional processing and use exactly what you specify. ( e.g. url: https://influxdb.example.com:8086/write?db=netapp&u=user&p=pass&precision=2. When using url, the bucket, org, port, and precision fields will be ignored.

      parameter type description default url string URL of the database, format: SCHEME://HOST[:PORT] addr string address of the database, format: HOST (HTTP only) port int, optional port of the database 8086 bucket string, required with addr InfluxDB bucket to write org string, required with addr InfluxDB organization name precision string, required with addr Preferred timestamp precision in seconds 2 client_timeout int, optional client timeout in seconds 5 token string token for authentication"},{"location":"influxdb-exporter/#example","title":"Example","text":"

      snippet from harvest.yml using addr: (supports HTTP only))

      Exporters:\n  my_influx:\n    exporter: InfluxDB\n    addr: localhost\n    bucket: harvest\n    org: harvest\n    token: ZTTrt%24@#WNFM2VZTTNNT25wZWUdtUmhBZEdVUmd3dl@# \n

      snippet from harvest.yml using url: (supports both HTTP/HTTPS))

      Exporters:\n  influx2:\n    exporter: InfluxDB\n    url: https://localhost:8086/api/v2/write?org=harvest&bucket=harvest&precision=s\n    token: my-token== \n

      Notice: InfluxDB stores a token in ~/.influxdbv2/configs, but you can also retrieve it from the UI (usually serving on localhost:8086): click on \"Data\" on the left task bar, then on \"Tokens\".

      "},{"location":"license/","title":"License","text":"

      Harvest's License

      "},{"location":"manage-harvest/","title":"Manage Harvest Pollers","text":"

      Coming Soon

      "},{"location":"monitor-harvest/","title":"Monitor Harvest","text":""},{"location":"monitor-harvest/#harvest-metadata","title":"Harvest Metadata","text":"

      Harvest publishes metadata metrics about the key components of Harvest. Many of these metrics are used in the Harvest Metadata dashboard.

      If you want to understand more about these metrics, read on!

      Metrics are published for:

      • collectors
      • pollers
      • clusters being monitored
      • exporters

      Here's a high-level summary of the metadata metrics Harvest publishes with details below.

      Metric Description Units metadata_collector_api_time amount of time to collect data from monitored cluster object microseconds metadata_collector_instances number of objects collected from monitored cluster scalar metadata_collector_metrics number of counters collected from monitored cluster scalar metadata_collector_parse_time amount of time to parse XML, JSON, etc. for cluster object microseconds metadata_collector_plugin_time amount of time for all plugins to post-process metrics microseconds metadata_collector_poll_time amount of time it took for the poll to finish microseconds metadata_collector_task_time amount of time it took for each collector's subtasks to complete microseconds metadata_component_count number of metrics collected for each object scalar metadata_component_status status of the collector - 0 means running, 1 means standby, 2 means failed enum metadata_exporter_count number of metrics and labels exported scalar metadata_exporter_time amount of time it took to render, export, and serve exported data microseconds metadata_target_goroutines number of goroutines that exist within the poller scalar metadata_target_status status of the system being monitored. 0 means reachable, 1 means unreachable enum metadata_collector_calc_time amount of time it took to compute metrics between two successive polls, specifically using properties like raw, delta, rate, average, and percent. This metric is available for ZapiPerf/RestPerf collectors. microseconds metadata_collector_skips number of metrics that were not calculated between two successive polls. This metric is available for ZapiPerf/RestPerf collectors. scalar"},{"location":"monitor-harvest/#collector-metadata","title":"Collector Metadata","text":"

      A poller publishes the metadata metrics for each collector and exporter associated with it.

      Let's say we start a poller with the Zapi collector and the out-of-the-box default.yaml exporting metrics to Prometheus. That means you will be monitoring 22 different objects (uncommented lines in default.yaml as of 23.02).

      When we start this poller, we expect it to export 23 metadata_component_status metrics. One for each of the 22 objects, plus one for the Prometheus exporter.

      The following curl confirms there are 23 metadata_component_status metrics reported.

      curl -s http://localhost:12990/metrics | grep -v \"#\" | grep metadata_component_status | wc -l\n      23\n

      These metrics also tell us which collectors are in an standby or failed state. For example, filtering on components not in the running state shows the following since this cluster doesn't have any ClusterPeers, SecurityAuditDestinations, or SnapMirrors. The reason is listed as no instances and the metric value is 1 which means standby.

      curl -s http://localhost:12990/metrics | grep -v \"#\" | grep metadata_component_status | grep -Evo \"running\"\nmetadata_component_status{name=\"Zapi\", reason=\"no instances\",target=\"ClusterPeer\",type=\"collector\",version=\"23.04.1417\"} 1\nmetadata_component_status{name=\"Zapi\", reason=\"no instances\",target=\"SecurityAuditDestination\",type=\"collector\",version=\"23.04.1417\"} 1\nmetadata_component_status{name=\"Zapi\", reason=\"no instances\",target=\"SnapMirror\",type=\"collector\",version=\"23.04.1417\"} 1\n

      The log files for the poller show a similar story. The poller starts with 22 collectors, but drops to 19 after three of the collectors go to standby because there are no instances to collect.

      2023-04-17T13:14:18-04:00 INF ./poller.go:539 > updated status, up collectors: 22 (of 22), up exporters: 1 (of 1) Poller=u2\n2023-04-17T13:14:18-04:00 INF collector/collector.go:342 > no instances, entering standby Poller=u2 collector=Zapi:SecurityAuditDestination task=data\n2023-04-17T13:14:18-04:00 INF collector/collector.go:342 > no instances, entering standby Poller=u2 collector=Zapi:ClusterPeer task=data\n2023-04-17T13:14:18-04:00 INF collector/collector.go:342 > no instances, entering standby Poller=u2 collector=Zapi:SnapMirror task=data\n2023-04-17T13:15:18-04:00 INF ./poller.go:539 > updated status, up collectors: 19 (of 22), up exporters: 1 (of 1) Poller=u2\n
      "},{"location":"ontap-metrics/","title":"ONTAP Metrics","text":"

      This document describes how Harvest metrics relate to their relevant ONTAP ZAPI and REST mappings, including:

      • Details about which Harvest metrics each dashboard uses. These can be generated on demand by running bin/harvest grafana metrics. See #1577 for details.

      • More information about ONTAP REST performance counters can be found here.

      Creation Date : 2024-Sep-17\nONTAP Version: 9.15.1\n
      "},{"location":"ontap-metrics/#understanding-the-structure","title":"Understanding the structure","text":"

      Below is an annotated example of how to interpret the structure of each of the metrics.

      disk_io_queued Name of the metric exported by Harvest

      Number of I/Os queued to the disk but not yet issued Description of the ONTAP metric

      • API will be one of REST or ZAPI depending on which collector is used to collect the metric
      • Endpoint name of the REST or ZAPI API used to collect this metric
      • Metric name of the ONTAP metric Template path of the template that collects the metric

      Performance related metrics also include:

      • Unit the unit of the metric
      • Type describes how to calculate a cooked metric from two consecutive ONTAP raw metrics
      • Base some counters require a base counter for post-processing. When required, this property lists the base counter
      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#metrics","title":"Metrics","text":""},{"location":"ontap-metrics/#aggr_disk_busy","title":"aggr_disk_busy","text":"

      The utilization percent of the disk. aggr_disk_busy is disk_busy aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_capacity","title":"aggr_disk_capacity","text":"

      Disk capacity in MB. aggr_disk_capacity is disk_capacity aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_cp_read_chain","title":"aggr_disk_cp_read_chain","text":"

      Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_cp_read_chain is disk_cp_read_chain aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_cp_read_latency","title":"aggr_disk_cp_read_latency","text":"

      Average latency per block in microseconds for consistency point read operations. aggr_disk_cp_read_latency is disk_cp_read_latency aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_cp_reads","title":"aggr_disk_cp_reads","text":"

      Number of disk read operations initiated each second for consistency point processing. aggr_disk_cp_reads is disk_cp_reads aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_io_pending","title":"aggr_disk_io_pending","text":"

      Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_io_pending is disk_io_pending aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_io_queued","title":"aggr_disk_io_queued","text":"

      Number of I/Os queued to the disk but not yet issued. aggr_disk_io_queued is disk_io_queued aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_busy","title":"aggr_disk_max_busy","text":"

      The utilization percent of the disk. aggr_disk_max_busy is the maximum of disk_busy for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_capacity","title":"aggr_disk_max_capacity","text":"

      Disk capacity in MB. aggr_disk_max_capacity is the maximum of disk_capacity for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_cp_read_chain","title":"aggr_disk_max_cp_read_chain","text":"

      Average number of blocks transferred in each consistency point read operation during a CP. aggr_disk_max_cp_read_chain is the maximum of disk_cp_read_chain for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_cp_read_latency","title":"aggr_disk_max_cp_read_latency","text":"

      Average latency per block in microseconds for consistency point read operations. aggr_disk_max_cp_read_latency is the maximum of disk_cp_read_latency for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_cp_reads","title":"aggr_disk_max_cp_reads","text":"

      Number of disk read operations initiated each second for consistency point processing. aggr_disk_max_cp_reads is the maximum of disk_cp_reads for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_io_pending","title":"aggr_disk_max_io_pending","text":"

      Average number of I/Os issued to the disk for which we have not yet received the response. aggr_disk_max_io_pending is the maximum of disk_io_pending for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_io_queued","title":"aggr_disk_max_io_queued","text":"

      Number of I/Os queued to the disk but not yet issued. aggr_disk_max_io_queued is the maximum of disk_io_queued for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_total_data","title":"aggr_disk_max_total_data","text":"

      Total throughput for user operations per second. aggr_disk_max_total_data is the maximum of disk_total_data for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_total_transfers","title":"aggr_disk_max_total_transfers","text":"

      Total number of disk operations involving data transfer initiated per second. aggr_disk_max_total_transfers is the maximum of disk_total_transfers for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_read_blocks","title":"aggr_disk_max_user_read_blocks","text":"

      Number of blocks transferred for user read operations per second. aggr_disk_max_user_read_blocks is the maximum of disk_user_read_blocks for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_read_chain","title":"aggr_disk_max_user_read_chain","text":"

      Average number of blocks transferred in each user read operation. aggr_disk_max_user_read_chain is the maximum of disk_user_read_chain for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_read_latency","title":"aggr_disk_max_user_read_latency","text":"

      Average latency per block in microseconds for user read operations. aggr_disk_max_user_read_latency is the maximum of disk_user_read_latency for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_reads","title":"aggr_disk_max_user_reads","text":"

      Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_max_user_reads is the maximum of disk_user_reads for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_write_blocks","title":"aggr_disk_max_user_write_blocks","text":"

      Number of blocks transferred for user write operations per second. aggr_disk_max_user_write_blocks is the maximum of disk_user_write_blocks for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_write_chain","title":"aggr_disk_max_user_write_chain","text":"

      Average number of blocks transferred in each user write operation. aggr_disk_max_user_write_chain is the maximum of disk_user_write_chain for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_write_latency","title":"aggr_disk_max_user_write_latency","text":"

      Average latency per block in microseconds for user write operations. aggr_disk_max_user_write_latency is the maximum of disk_user_write_latency for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_max_user_writes","title":"aggr_disk_max_user_writes","text":"

      Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_max_user_writes is the maximum of disk_user_writes for label aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_total_data","title":"aggr_disk_total_data","text":"

      Total throughput for user operations per second. aggr_disk_total_data is disk_total_data aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_total_transfers","title":"aggr_disk_total_transfers","text":"

      Total number of disk operations involving data transfer initiated per second. aggr_disk_total_transfers is disk_total_transfers aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_read_blocks","title":"aggr_disk_user_read_blocks","text":"

      Number of blocks transferred for user read operations per second. aggr_disk_user_read_blocks is disk_user_read_blocks aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_read_chain","title":"aggr_disk_user_read_chain","text":"

      Average number of blocks transferred in each user read operation. aggr_disk_user_read_chain is disk_user_read_chain aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_read_latency","title":"aggr_disk_user_read_latency","text":"

      Average latency per block in microseconds for user read operations. aggr_disk_user_read_latency is disk_user_read_latency aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_reads","title":"aggr_disk_user_reads","text":"

      Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. aggr_disk_user_reads is disk_user_reads aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_write_blocks","title":"aggr_disk_user_write_blocks","text":"

      Number of blocks transferred for user write operations per second. aggr_disk_user_write_blocks is disk_user_write_blocks aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_write_chain","title":"aggr_disk_user_write_chain","text":"

      Average number of blocks transferred in each user write operation. aggr_disk_user_write_chain is disk_user_write_chain aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_write_latency","title":"aggr_disk_user_write_latency","text":"

      Average latency per block in microseconds for user write operations. aggr_disk_user_write_latency is disk_user_write_latency aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_disk_user_writes","title":"aggr_disk_user_writes","text":"

      Number of disk write operations initiated each second for storing data or metadata associated with user requests. aggr_disk_user_writes is disk_user_writes aggregated by aggr.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_efficiency_savings","title":"aggr_efficiency_savings","text":"

      Space saved by storage efficiencies (logical_used - used)

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency.savings conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_efficiency_savings_wo_snapshots","title":"aggr_efficiency_savings_wo_snapshots","text":"

      Space saved by storage efficiencies (logical_used - used)

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots.savings conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_efficiency_savings_wo_snapshots_flexclones","title":"aggr_efficiency_savings_wo_snapshots_flexclones","text":"

      Space saved by storage efficiencies (logical_used - used)

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots_flexclones.savings conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_hybrid_cache_size_total","title":"aggr_hybrid_cache_size_total","text":"

      Total usable space in bytes of SSD cache. Only provided when hybrid_cache.enabled is 'true'.

      API Endpoint Metric Template REST api/storage/aggregates block_storage.hybrid_cache.size conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.hybrid-cache-size-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_hybrid_disk_count","title":"aggr_hybrid_disk_count","text":"

      Number of disks used in the cache tier of the aggregate. Only provided when hybrid_cache.enabled is 'true'.

      API Endpoint Metric Template REST api/storage/aggregates block_storage.hybrid_cache.disk_count conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_files_private_used","title":"aggr_inode_files_private_used","text":"

      Number of system metadata files used. If the referenced file system is restricted or offline, a value of 0 is returned.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.files_private_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.files-private-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_files_total","title":"aggr_inode_files_total","text":"

      Maximum number of user-visible files that this referenced file system can currently hold. If the referenced file system is restricted or offline, a value of 0 is returned.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.files_total conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.files-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_files_used","title":"aggr_inode_files_used","text":"

      Number of user-visible files used in the referenced file system. If the referenced file system is restricted or offline, a value of 0 is returned.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.files-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_inodefile_private_capacity","title":"aggr_inode_inodefile_private_capacity","text":"

      Number of files that can currently be stored on disk for system metadata files. This number will dynamically increase as more system files are created.This is an advanced property; there is an added computationl cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.file_private_capacity conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.inodefile-private-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_inodefile_public_capacity","title":"aggr_inode_inodefile_public_capacity","text":"

      Number of files that can currently be stored on disk for user-visible files. This number will dynamically increase as more user-visible files are created.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.file_public_capacity conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.inodefile-public-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_maxfiles_available","title":"aggr_inode_maxfiles_available","text":"

      The count of the maximum number of user-visible files currently allowable on the referenced file system.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.max_files_available conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.maxfiles-available conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_maxfiles_possible","title":"aggr_inode_maxfiles_possible","text":"

      The largest value to which the maxfiles-available parameter can be increased by reconfiguration, on the referenced file system.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.max_files_possible conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.maxfiles-possible conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_maxfiles_used","title":"aggr_inode_maxfiles_used","text":"

      The number of user-visible files currently in use on the referenced file system.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.max_files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.maxfiles-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_inode_used_percent","title":"aggr_inode_used_percent","text":"

      The percentage of disk space currently in use based on user-visible file count on the referenced file system.

      API Endpoint Metric Template REST api/storage/aggregates inode_attributes.used_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-inode-attributes.percent-inode-used-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_logical_used_wo_snapshots","title":"aggr_logical_used_wo_snapshots","text":"

      Logical used

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots.logical_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-logical-used-wo-snapshots conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_logical_used_wo_snapshots_flexclones","title":"aggr_logical_used_wo_snapshots_flexclones","text":"

      Logical used

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots_flexclones.logical_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-logical-used-wo-snapshots-flexclones conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_object_store_logical_used","title":"aggr_object_store_logical_used","text":"

      Logical space usage of aggregates in the attached object store.

      API Endpoint Metric Template REST api/private/cli/aggr/show-space object_store_logical_used conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_object_store_physical_used","title":"aggr_object_store_physical_used","text":"

      Physical space usage of aggregates in the attached object store.

      API Endpoint Metric Template REST api/private/cli/aggr/show-space object_store_physical_used conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_physical_used_wo_snapshots","title":"aggr_physical_used_wo_snapshots","text":"

      Total Data Reduction Physical Used Without Snapshots

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots.logical_used, space.efficiency_without_snapshots.savings conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshots conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_physical_used_wo_snapshots_flexclones","title":"aggr_physical_used_wo_snapshots_flexclones","text":"

      Total Data Reduction Physical Used without snapshots and flexclones

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency_without_snapshots_flexclones.logical_used, space.efficiency_without_snapshots_flexclones.savings conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-data-reduction-physical-used-wo-snapshots-flexclones conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_power","title":"aggr_power","text":"

      Power consumed by aggregate in Watts.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#aggr_primary_disk_count","title":"aggr_primary_disk_count","text":"

      Number of disks used in the aggregate. This includes parity disks, but excludes disks in the hybrid cache.

      API Endpoint Metric Template REST api/storage/aggregates block_storage.primary.disk_count conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_raid_disk_count","title":"aggr_raid_disk_count","text":"

      Number of disks in the aggregate.

      API Endpoint Metric Template REST api/storage/aggregates block_storage.primary.disk_count, block_storage.hybrid_cache.disk_count conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-raid-attributes.disk-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_raid_plex_count","title":"aggr_raid_plex_count","text":"

      Number of plexes in the aggregate

      API Endpoint Metric Template REST api/storage/aggregates block_storage.plexes.# conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-raid-attributes.plex-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_raid_size","title":"aggr_raid_size","text":"

      Option to specify the maximum number of disks that can be included in a RAID group.

      API Endpoint Metric Template REST api/storage/aggregates block_storage.primary.raid_size conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-raid-attributes.raid-size conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_files_total","title":"aggr_snapshot_files_total","text":"

      Total files allowed in Snapshot copies

      API Endpoint Metric Template REST api/storage/aggregates snapshot.files_total conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.files-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_files_used","title":"aggr_snapshot_files_used","text":"

      Total files created in Snapshot copies

      API Endpoint Metric Template REST api/storage/aggregates snapshot.files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.files-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_inode_used_percent","title":"aggr_snapshot_inode_used_percent","text":"

      The percentage of disk space currently in use based on user-visible file (inode) count on the referenced file system.

      API Endpoint Metric Template ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.percent-inode-used-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_maxfiles_available","title":"aggr_snapshot_maxfiles_available","text":"

      Maximum files available for Snapshot copies

      API Endpoint Metric Template REST api/storage/aggregates snapshot.max_files_available conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.maxfiles-available conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_maxfiles_possible","title":"aggr_snapshot_maxfiles_possible","text":"

      The largest value to which the maxfiles-available parameter can be increased by reconfiguration, on the referenced file system.

      API Endpoint Metric Template REST api/storage/aggregates snapshot.max_files_available, snapshot.max_files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.maxfiles-possible conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_maxfiles_used","title":"aggr_snapshot_maxfiles_used","text":"

      Files in use by Snapshot copies

      API Endpoint Metric Template REST api/storage/aggregates snapshot.max_files_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.maxfiles-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_reserve_percent","title":"aggr_snapshot_reserve_percent","text":"

      Percentage of space reserved for Snapshot copies

      API Endpoint Metric Template REST api/storage/aggregates space.snapshot.reserve_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.snapshot-reserve-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_size_available","title":"aggr_snapshot_size_available","text":"

      Available space for Snapshot copies in bytes

      API Endpoint Metric Template REST api/storage/aggregates space.snapshot.available conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.size-available conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_size_total","title":"aggr_snapshot_size_total","text":"

      Total space for Snapshot copies in bytes

      API Endpoint Metric Template REST api/storage/aggregates space.snapshot.total conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.size-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_size_used","title":"aggr_snapshot_size_used","text":"

      Space used by Snapshot copies in bytes

      API Endpoint Metric Template REST api/storage/aggregates space.snapshot.used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.size-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_snapshot_used_percent","title":"aggr_snapshot_used_percent","text":"

      Percentage of disk space used by Snapshot copies

      API Endpoint Metric Template REST api/storage/aggregates space.snapshot.used_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-snapshot-attributes.percent-used-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_available","title":"aggr_space_available","text":"

      Space available in bytes.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.available conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.size-available conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_capacity_tier_used","title":"aggr_space_capacity_tier_used","text":"

      Used space in bytes in the cloud store. Only applicable for aggregates with a cloud store tier.

      API Endpoint Metric Template REST api/storage/aggregates space.cloud_storage.used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.capacity-tier-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_data_compacted_count","title":"aggr_space_data_compacted_count","text":"

      Amount of compacted data in bytes.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.data_compacted_count conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.data-compacted-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_data_compaction_saved","title":"aggr_space_data_compaction_saved","text":"

      Space saved in bytes by compacting the data.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.data_compaction_space_saved conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.data-compaction-space-saved conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_data_compaction_saved_percent","title":"aggr_space_data_compaction_saved_percent","text":"

      Percentage saved by compacting the data.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.data_compaction_space_saved_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.data-compaction-space-saved-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_performance_tier_inactive_user_data","title":"aggr_space_performance_tier_inactive_user_data","text":"

      The size that is physically used in the block storage and has a cold temperature, in bytes. This property is only supported if the aggregate is either attached to a cloud store or can be attached to a cloud store.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either block_storage.inactive_user_data or **.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.inactive_user_data conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.performance-tier-inactive-user-data conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_performance_tier_inactive_user_data_percent","title":"aggr_space_performance_tier_inactive_user_data_percent","text":"

      The percentage of inactive user data in the block storage. This property is only supported if the aggregate is either attached to a cloud store or can be attached to a cloud store.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either block_storage.inactive_user_data_percent or **.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.inactive_user_data_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.performance-tier-inactive-user-data-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_performance_tier_used","title":"aggr_space_performance_tier_used","text":"

      A summation of volume footprints (including volume guarantees), in bytes. This includes all of the volume footprints in the block_storage tier and the cloud_storage tier.This is an advanced property; there is an added computational cost to retrieving its value. The field is not populated for either a collection GET or an instance GET unless it is explicitly requested using the fields query parameter containing either footprint or **.

      API Endpoint Metric Template REST api/storage/aggregates space.footprint conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_performance_tier_used_percent","title":"aggr_space_performance_tier_used_percent","text":"

      A summation of volume footprints inside the aggregate,as a percentage. A volume's footprint is the amount of space being used for the volume in the aggregate.

      API Endpoint Metric Template REST api/storage/aggregates space.footprint_percent conf/rest/9.12.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_physical_used","title":"aggr_space_physical_used","text":"

      Total physical used size of an aggregate in bytes.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.physical_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.physical-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_physical_used_percent","title":"aggr_space_physical_used_percent","text":"

      Physical used percentage.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.physical_used_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.physical-used-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_reserved","title":"aggr_space_reserved","text":"

      The total disk space in bytes that is reserved on the referenced file system. The reserved space is already counted in the used space, so this element can be used to see what portion of the used space represents space reserved for future use.

      API Endpoint Metric Template ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.total-reserved-space conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_sis_saved","title":"aggr_space_sis_saved","text":"

      Amount of space saved in bytes by storage efficiency.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.volume_deduplication_space_saved conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.sis-space-saved conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_sis_saved_percent","title":"aggr_space_sis_saved_percent","text":"

      Percentage of space saved by storage efficiency.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.volume_deduplication_space_saved_percent conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.sis-space-saved-percent conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_sis_shared_count","title":"aggr_space_sis_shared_count","text":"

      Amount of shared bytes counted by storage efficiency.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.volume_deduplication_shared_count conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.sis-shared-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_total","title":"aggr_space_total","text":"

      Total usable space in bytes, not including WAFL reserve and aggregate Snapshot copy reserve.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.size conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.size-total conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_used","title":"aggr_space_used","text":"

      Space used or reserved in bytes. Includes volume guarantees and aggregate metadata.

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.used conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.size-used conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_space_used_percent","title":"aggr_space_used_percent","text":"

      The percentage of disk space currently in use on the referenced file system

      API Endpoint Metric Template REST api/storage/aggregates space.block_storage.used, space.block_storage.size conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-space-attributes.percent-used-capacity conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#aggr_total_logical_used","title":"aggr_total_logical_used","text":"

      Logical used

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency.logical_used conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-logical-used conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_total_physical_used","title":"aggr_total_physical_used","text":"

      Total Physical Used

      API Endpoint Metric Template REST api/storage/aggregates space.efficiency.logical_used, space.efficiency.savings conf/rest/9.12.0/aggr.yaml ZAPI aggr-efficiency-get-iter aggr-efficiency-info.aggr-efficiency-cumulative-info.total-physical-used conf/zapi/cdot/9.9.0/aggr_efficiency.yaml"},{"location":"ontap-metrics/#aggr_volume_count","title":"aggr_volume_count","text":"

      The aggregate's volume count, which includes both FlexVols and FlexGroup constituents.

      API Endpoint Metric Template REST api/storage/aggregates volume_count conf/rest/9.12.0/aggr.yaml ZAPI aggr-get-iter aggr-attributes.aggr-volume-count-attributes.flexvol-count conf/zapi/cdot/9.8.0/aggr.yaml"},{"location":"ontap-metrics/#cifs_session_connection_count","title":"cifs_session_connection_count","text":"

      A counter used to track requests that are sent to the volumes to the node.

      API Endpoint Metric Template REST api/protocols/cifs/sessions connection_count conf/rest/9.8.0/cifs_session.yaml ZAPI cifs-session-get-iter cifs-session.connection-count conf/zapi/cdot/9.8.0/cifs_session.yaml"},{"location":"ontap-metrics/#cloud_target_used","title":"cloud_target_used","text":"

      The amount of cloud space used by all the aggregates attached to the target, in bytes. This field is only populated for FabricPool targets. The value is recalculated once every 5 minutes.

      API Endpoint Metric Template REST api/cloud/targets used conf/rest/9.12.0/cloud_target.yaml ZAPI aggr-object-store-config-get-iter aggr-object-store-config-info.used-space conf/zapi/cdot/9.10.0/aggr_object_store_config.yaml"},{"location":"ontap-metrics/#cluster_new_status","title":"cluster_new_status","text":"

      It is an indicator of the overall health status of the cluster, with a value of 1 indicating a healthy status and a value of 0 indicating an unhealthy status.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/status.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/status.yaml"},{"location":"ontap-metrics/#cluster_subsystem_outstanding_alerts","title":"cluster_subsystem_outstanding_alerts","text":"

      Number of outstanding alerts

      API Endpoint Metric Template REST api/private/cli/system/health/subsystem outstanding_alert_count conf/rest/9.12.0/subsystem.yaml ZAPI diagnosis-subsystem-config-get-iter diagnosis-subsystem-config-info.outstanding-alert-count conf/zapi/cdot/9.8.0/subsystem.yaml"},{"location":"ontap-metrics/#cluster_subsystem_suppressed_alerts","title":"cluster_subsystem_suppressed_alerts","text":"

      Number of suppressed alerts

      API Endpoint Metric Template REST api/private/cli/system/health/subsystem suppressed_alert_count conf/rest/9.12.0/subsystem.yaml ZAPI diagnosis-subsystem-config-get-iter diagnosis-subsystem-config-info.suppressed-alert-count conf/zapi/cdot/9.8.0/subsystem.yaml"},{"location":"ontap-metrics/#copy_manager_bce_copy_count_curr","title":"copy_manager_bce_copy_count_curr","text":"

      Current number of copy requests being processed by the Block Copy Engine.

      API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager block_copy_engine_current_copy_countUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager bce_copy_count_currUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#copy_manager_kb_copied","title":"copy_manager_kb_copied","text":"

      Sum of kilo-bytes copied.

      API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager KB_copiedUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager KB_copiedUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#copy_manager_ocs_copy_count_curr","title":"copy_manager_ocs_copy_count_curr","text":"

      Current number of copy requests being processed by the ONTAP copy subsystem.

      API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager ontap_copy_subsystem_current_copy_countUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager ocs_copy_count_currUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#copy_manager_sce_copy_count_curr","title":"copy_manager_sce_copy_count_curr","text":"

      Current number of copy requests being processed by the System Continuous Engineering.

      API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager system_continuous_engineering_current_copy_countUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager sce_copy_count_currUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#copy_manager_spince_copy_count_curr","title":"copy_manager_spince_copy_count_curr","text":"

      Current number of copy requests being processed by the SpinCE.

      API Endpoint Metric Template REST api/cluster/counter/tables/copy_manager spince_current_copy_countUnit: noneType: deltaBase: conf/restperf/9.12.0/copy_manager.yaml ZAPI perf-object-get-instances copy_manager spince_copy_count_currUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/copy_manager.yaml"},{"location":"ontap-metrics/#disk_busy","title":"disk_busy","text":"

      The utilization percent of the disk

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_bytes_per_sector","title":"disk_bytes_per_sector","text":"

      Bytes per sector.

      API Endpoint Metric Template REST api/storage/disks bytes_per_sector conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-inventory-info.bytes-per-sector conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_capacity","title":"disk_capacity","text":"

      Disk capacity in MB

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_cp_read_chain","title":"disk_cp_read_chain","text":"

      Average number of blocks transferred in each consistency point read operation during a CP

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_cp_read_latency","title":"disk_cp_read_latency","text":"

      Average latency per block in microseconds for consistency point read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_cp_reads","title":"disk_cp_reads","text":"

      Number of disk read operations initiated each second for consistency point processing

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_io_pending","title":"disk_io_pending","text":"

      Average number of I/Os issued to the disk for which we have not yet received the response

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_io_queued","title":"disk_io_queued","text":"

      Number of I/Os queued to the disk but not yet issued

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_power_on_hours","title":"disk_power_on_hours","text":"

      Hours powered on.

      API Endpoint Metric Template REST api/storage/disks stats.power_on_hours conf/rest/9.12.0/disk.yaml"},{"location":"ontap-metrics/#disk_sectors","title":"disk_sectors","text":"

      Number of sectors on the disk.

      API Endpoint Metric Template REST api/storage/disks sector_count conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-inventory-info.capacity-sectors conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_stats_average_latency","title":"disk_stats_average_latency","text":"

      Average I/O latency across all active paths, in milliseconds.

      API Endpoint Metric Template REST api/storage/disks stats.average_latency conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.average-latency conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_stats_io_kbps","title":"disk_stats_io_kbps","text":"

      Total Disk Throughput in KBPS Across All Active Paths

      API Endpoint Metric Template REST api/private/cli/disk disk_io_kbps_total conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.disk-io-kbps conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_stats_sectors_read","title":"disk_stats_sectors_read","text":"

      Number of Sectors Read

      API Endpoint Metric Template REST api/private/cli/disk sectors_read conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.sectors-read conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_stats_sectors_written","title":"disk_stats_sectors_written","text":"

      Number of Sectors Written

      API Endpoint Metric Template REST api/private/cli/disk sectors_written conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.sectors-written conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_total_data","title":"disk_total_data","text":"

      Total throughput for user operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_total_transfers","title":"disk_total_transfers","text":"

      Total number of disk operations involving data transfer initiated per second

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_uptime","title":"disk_uptime","text":"

      Number of seconds the drive has been powered on

      API Endpoint Metric Template REST api/storage/disks stats.power_on_hours, 60, 60 conf/rest/9.12.0/disk.yaml ZAPI storage-disk-get-iter storage-disk-info.disk-stats-info.power-on-time-interval conf/zapi/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_usable_size","title":"disk_usable_size","text":"

      Usable size of each disk, in bytes.

      API Endpoint Metric Template REST api/storage/disks usable_size conf/rest/9.12.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_read_blocks","title":"disk_user_read_blocks","text":"

      Number of blocks transferred for user read operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_read_chain","title":"disk_user_read_chain","text":"

      Average number of blocks transferred in each user read operation

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_read_latency","title":"disk_user_read_latency","text":"

      Average latency per block in microseconds for user read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_reads","title":"disk_user_reads","text":"

      Number of disk read operations initiated each second for retrieving data or metadata associated with user requests

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_write_blocks","title":"disk_user_write_blocks","text":"

      Number of blocks transferred for user write operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_write_chain","title":"disk_user_write_chain","text":"

      Average number of blocks transferred in each user write operation

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_write_latency","title":"disk_user_write_latency","text":"

      Average latency per block in microseconds for user write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#disk_user_writes","title":"disk_user_writes","text":"

      Number of disk write operations initiated each second for storing data or metadata associated with user requests

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#environment_sensor_average_ambient_temperature","title":"environment_sensor_average_ambient_temperature","text":"

      Average temperature of all ambient sensors for node in Celsius.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_average_fan_speed","title":"environment_sensor_average_fan_speed","text":"

      Average fan speed for node in rpm.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_average_temperature","title":"environment_sensor_average_temperature","text":"

      Average temperature of all non-ambient sensors for node in Celsius.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_max_fan_speed","title":"environment_sensor_max_fan_speed","text":"

      Maximum fan speed for node in rpm.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_max_temperature","title":"environment_sensor_max_temperature","text":"

      Maximum temperature of all non-ambient sensors for node in Celsius.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_min_ambient_temperature","title":"environment_sensor_min_ambient_temperature","text":"

      Minimum temperature of all ambient sensors for node in Celsius.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_min_fan_speed","title":"environment_sensor_min_fan_speed","text":"

      Minimum fan speed for node in rpm.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_min_temperature","title":"environment_sensor_min_temperature","text":"

      Minimum temperature of all non-ambient sensors for node in Celsius.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_power","title":"environment_sensor_power","text":"

      Power consumed by a node in Watts.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/sensor.yaml ZAPI NA Harvest generated conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#environment_sensor_threshold_value","title":"environment_sensor_threshold_value","text":"

      Provides the sensor reading.

      API Endpoint Metric Template REST api/cluster/sensors value conf/rest/9.12.0/sensor.yaml ZAPI environment-sensors-get-iter environment-sensors-info.threshold-sensor-value conf/zapi/cdot/9.8.0/sensor.yaml"},{"location":"ontap-metrics/#external_service_op_num_not_found_responses","title":"external_service_op_num_not_found_responses","text":"

      Number of 'Not Found' responses for calls to this operation.

      API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_not_found_responsesUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_request_failures","title":"external_service_op_num_request_failures","text":"

      A cumulative count of all request failures.

      API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_request_failuresUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_requests_sent","title":"external_service_op_num_requests_sent","text":"

      Number of requests sent to this service.

      API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_requests_sentUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_responses_received","title":"external_service_op_num_responses_received","text":"

      Number of responses received from the server (does not include timeouts).

      API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_responses_receivedUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_successful_responses","title":"external_service_op_num_successful_responses","text":"

      Number of successful responses to this operation.

      API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_successful_responsesUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_num_timeouts","title":"external_service_op_num_timeouts","text":"

      Number of times requests to the server for this operation timed out, meaning no response was recevied in a given time period.

      API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op num_timeoutsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_request_latency","title":"external_service_op_request_latency","text":"

      Average latency of requests for operations of this type on this server.

      API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op request_latencyUnit: microsecType: averageBase: num_requests_sent conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#external_service_op_request_latency_hist","title":"external_service_op_request_latency_hist","text":"

      This histogram holds the latency values for requests of this operation to the specified server.

      API Endpoint Metric Template ZAPI perf-object-get-instances external_service_op request_latency_histUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/external_service_operation.yaml"},{"location":"ontap-metrics/#fabricpool_average_latency","title":"fabricpool_average_latency","text":"

      This counter is deprecated.Average latencies executed during various phases of command execution. The execution-start latency represents the average time taken to start executing an operation. The request-prepare latency represent the average time taken to prepare the commplete request that needs to be sent to the server. The send latency represents the average time taken to send requests to the server. The execution-start-to-send-complete represents the average time taken to send an operation out since its execution started. The execution-start-to-first-byte-received represent the average time taken to receive the first byte of a response since the command's request execution started. These counters can be used to identify performance bottlenecks within the object store client module.

      API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op average_latencyUnit: Type: Base: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fabricpool_cloud_bin_op_latency_average","title":"fabricpool_cloud_bin_op_latency_average","text":"

      Cloud bin operation latency average in milliseconds.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_comp_aggr_vol_bin cloud_bin_op_latency_averageUnit: millisecType: rawBase: conf/restperf/9.12.0/wafl_comp_aggr_vol_bin.yaml ZAPI perf-object-get-instances wafl_comp_aggr_vol_bin cloud_bin_op_latency_averageUnit: millisecType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml"},{"location":"ontap-metrics/#fabricpool_cloud_bin_operation","title":"fabricpool_cloud_bin_operation","text":"

      Cloud bin operation counters.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_comp_aggr_vol_bin cloud_bin_opUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl_comp_aggr_vol_bin.yaml ZAPI perf-object-get-instances wafl_comp_aggr_vol_bin cloud_bin_operationUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl_comp_aggr_vol_bin.yaml"},{"location":"ontap-metrics/#fabricpool_get_throughput_bytes","title":"fabricpool_get_throughput_bytes","text":"

      This counter is deprecated. Counter that indicates the throughput for GET command in bytes per second.

      API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op get_throughput_bytesUnit: Type: Base: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fabricpool_put_throughput_bytes","title":"fabricpool_put_throughput_bytes","text":"

      This counter is deprecated. Counter that indicates the throughput for PUT command in bytes per second.

      API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op put_throughput_bytesUnit: Type: Base: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fabricpool_stats","title":"fabricpool_stats","text":"

      This counter is deprecated. Counter that indicates the number of object store operations sent, and their success and failure counts. The objstore_client_op_name array indicate the operation name such as PUT, GET, etc. The objstore_client_op_stats_name array contain the total number of operations, their success and failure counter for each operation.

      API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op statsUnit: Type: Base: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fabricpool_throughput_ops","title":"fabricpool_throughput_ops","text":"

      Counter that indicates the throughput for commands in ops per second.

      API Endpoint Metric Template ZAPI perf-object-get-instances object_store_client_op throughput_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/object_store_client_op.yaml"},{"location":"ontap-metrics/#fcp_avg_other_latency","title":"fcp_avg_other_latency","text":"

      Average latency for operations other than read and write

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_avg_read_latency","title":"fcp_avg_read_latency","text":"

      Average latency for read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_avg_write_latency","title":"fcp_avg_write_latency","text":"

      Average latency for write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_discarded_frames_count","title":"fcp_discarded_frames_count","text":"

      Number of discarded frames.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp discarded_frames_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port discarded_frames_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_fabric_connected_speed","title":"fcp_fabric_connected_speed","text":"

      The negotiated data rate between the target FC port and the fabric in gigabits per second.

      API Endpoint Metric Template REST api/network/fc/ports fabric.connected_speed conf/rest/9.6.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_int_count","title":"fcp_int_count","text":"

      Number of interrupts

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp interrupt_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port int_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_invalid_crc","title":"fcp_invalid_crc","text":"

      Number of invalid cyclic redundancy checks (CRC count)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp invalid.crcUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port invalid_crcUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_invalid_transmission_word","title":"fcp_invalid_transmission_word","text":"

      Number of invalid transmission words

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp invalid.transmission_wordUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port invalid_transmission_wordUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_isr_count","title":"fcp_isr_count","text":"

      Number of interrupt responses

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp isr.countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port isr_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_lif_avg_latency","title":"fcp_lif_avg_latency","text":"

      Average latency for FCP operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_avg_other_latency","title":"fcp_lif_avg_other_latency","text":"

      Average latency for operations other than read and write

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_avg_read_latency","title":"fcp_lif_avg_read_latency","text":"

      Average latency for read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_avg_write_latency","title":"fcp_lif_avg_write_latency","text":"

      Average latency for write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_other_ops","title":"fcp_lif_other_ops","text":"

      Number of operations that are not read or write.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_read_data","title":"fcp_lif_read_data","text":"

      Amount of data read from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_read_ops","title":"fcp_lif_read_ops","text":"

      Number of read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_total_ops","title":"fcp_lif_total_ops","text":"

      Total number of operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_write_data","title":"fcp_lif_write_data","text":"

      Amount of data written to the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_lif_write_ops","title":"fcp_lif_write_ops","text":"

      Number of write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp_lif write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp_lif.yaml ZAPI perf-object-get-instances fcp_lif write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp_lif.yaml"},{"location":"ontap-metrics/#fcp_link_down","title":"fcp_link_down","text":"

      Number of times the Fibre Channel link was lost

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp link.downUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port link_downUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_link_failure","title":"fcp_link_failure","text":"

      Number of link failures

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp link_failureUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port link_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_link_up","title":"fcp_link_up","text":"

      Number of times the Fibre Channel link was established

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp link.upUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port link_upUnit: noneType: deltaBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_loss_of_signal","title":"fcp_loss_of_signal","text":"

      Number of times this port lost signal

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp loss_of_signalUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port loss_of_signalUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_loss_of_sync","title":"fcp_loss_of_sync","text":"

      Number of times this port lost sync

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp loss_of_syncUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port loss_of_syncUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_max_speed","title":"fcp_max_speed","text":"

      The maximum speed supported by the FC port in gigabits per second.

      API Endpoint Metric Template REST api/network/fc/ports speed.maximum conf/rest/9.6.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_other_latency","title":"fcp_nvmf_avg_other_latency","text":"

      Average latency for operations other than read and write (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_other_latencyUnit: microsecType: averageBase: nvmf.other_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_other_latencyUnit: microsecType: averageBase: nvmf_other_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_read_latency","title":"fcp_nvmf_avg_read_latency","text":"

      Average latency for read operations (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_read_latencyUnit: microsecType: averageBase: nvmf.read_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_read_latencyUnit: microsecType: averageBase: nvmf_read_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_remote_other_latency","title":"fcp_nvmf_avg_remote_other_latency","text":"

      Average latency for remote operations other than read and write (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_remote_other_latencyUnit: microsecType: averageBase: nvmf_remote.other_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_remote_other_latencyUnit: microsecType: averageBase: nvmf_remote_other_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_remote_read_latency","title":"fcp_nvmf_avg_remote_read_latency","text":"

      Average latency for remote read operations (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_remote_read_latencyUnit: microsecType: averageBase: nvmf_remote.read_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_remote_read_latencyUnit: microsecType: averageBase: nvmf_remote_read_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_remote_write_latency","title":"fcp_nvmf_avg_remote_write_latency","text":"

      Average latency for remote write operations (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_remote_write_latencyUnit: microsecType: averageBase: nvmf_remote.write_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_remote_write_latencyUnit: microsecType: averageBase: nvmf_remote_write_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_avg_write_latency","title":"fcp_nvmf_avg_write_latency","text":"

      Average latency for write operations (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.average_write_latencyUnit: microsecType: averageBase: nvmf.write_ops conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_avg_write_latencyUnit: microsecType: averageBase: nvmf_write_ops conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_caw_data","title":"fcp_nvmf_caw_data","text":"

      Amount of CAW data sent to the storage system (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.caw_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_caw_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_caw_ops","title":"fcp_nvmf_caw_ops","text":"

      Number of FC-NVMe CAW operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.caw_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_caw_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_command_slots","title":"fcp_nvmf_command_slots","text":"

      Number of command slots that have been used by initiators logging into this port. This shows the command fan-in on the port.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.command_slotsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_command_slotsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_other_ops","title":"fcp_nvmf_other_ops","text":"

      Number of NVMF operations that are not read or write.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_read_data","title":"fcp_nvmf_read_data","text":"

      Amount of data read from the storage system (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_read_ops","title":"fcp_nvmf_read_ops","text":"

      Number of FC-NVMe read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_caw_data","title":"fcp_nvmf_remote_caw_data","text":"

      Amount of remote CAW data sent to the storage system (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.caw_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_caw_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_caw_ops","title":"fcp_nvmf_remote_caw_ops","text":"

      Number of FC-NVMe remote CAW operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.caw_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_caw_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_other_ops","title":"fcp_nvmf_remote_other_ops","text":"

      Number of NVMF remote operations that are not read or write.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_read_data","title":"fcp_nvmf_remote_read_data","text":"

      Amount of remote data read from the storage system (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_read_ops","title":"fcp_nvmf_remote_read_ops","text":"

      Number of FC-NVMe remote read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_total_data","title":"fcp_nvmf_remote_total_data","text":"

      Amount of remote FC-NVMe traffic to and from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_total_ops","title":"fcp_nvmf_remote_total_ops","text":"

      Total number of remote FC-NVMe operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_write_data","title":"fcp_nvmf_remote_write_data","text":"

      Amount of remote data written to the storage system (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_remote_write_ops","title":"fcp_nvmf_remote_write_ops","text":"

      Number of FC-NVMe remote write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf_remote.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_remote_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_total_data","title":"fcp_nvmf_total_data","text":"

      Amount of FC-NVMe traffic to and from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_total_ops","title":"fcp_nvmf_total_ops","text":"

      Total number of FC-NVMe operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_write_data","title":"fcp_nvmf_write_data","text":"

      Amount of data written to the storage system (FC-NVMe)

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_nvmf_write_ops","title":"fcp_nvmf_write_ops","text":"

      Number of FC-NVMe write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp nvmf.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port nvmf_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/fcp.yaml"},{"location":"ontap-metrics/#fcp_other_ops","title":"fcp_other_ops","text":"

      Number of operations that are not read or write.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_prim_seq_err","title":"fcp_prim_seq_err","text":"

      Number of primitive sequence errors

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp primitive_seq_errUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port prim_seq_errUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_queue_full","title":"fcp_queue_full","text":"

      Number of times a queue full condition occurred.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp queue_fullUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port queue_fullUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_read_data","title":"fcp_read_data","text":"

      Amount of data read from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_read_ops","title":"fcp_read_ops","text":"

      Number of read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_reset_count","title":"fcp_reset_count","text":"

      Number of physical port resets

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp reset_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port reset_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_shared_int_count","title":"fcp_shared_int_count","text":"

      Number of shared interrupts

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp shared_interrupt_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port shared_int_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_spurious_int_count","title":"fcp_spurious_int_count","text":"

      Number of spurious interrupts

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp spurious_interrupt_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port spurious_int_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_threshold_full","title":"fcp_threshold_full","text":"

      Number of times the total number of outstanding commands on the port exceeds the threshold supported by this port.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp threshold_fullUnit: noneType: deltaBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port threshold_fullUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_total_data","title":"fcp_total_data","text":"

      Amount of FCP traffic to and from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_total_ops","title":"fcp_total_ops","text":"

      Total number of FCP operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_write_data","title":"fcp_write_data","text":"

      Amount of data written to the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcp_write_ops","title":"fcp_write_ops","text":"

      Number of write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/fcp write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/fcp.yaml ZAPI perf-object-get-instances fcp_port write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcp.yaml"},{"location":"ontap-metrics/#fcvi_firmware_invalid_crc_count","title":"fcvi_firmware_invalid_crc_count","text":"

      Firmware reported invalid CRC count

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.invalid_crc_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_invalid_crcUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_invalid_transmit_word_count","title":"fcvi_firmware_invalid_transmit_word_count","text":"

      Firmware reported invalid transmit word count

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.invalid_transmit_word_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_invalid_xmit_wordsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_link_failure_count","title":"fcvi_firmware_link_failure_count","text":"

      Firmware reported link failure count

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.link_failure_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_link_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_loss_of_signal_count","title":"fcvi_firmware_loss_of_signal_count","text":"

      Firmware reported loss of signal count

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.loss_of_signal_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_loss_of_signalUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_loss_of_sync_count","title":"fcvi_firmware_loss_of_sync_count","text":"

      Firmware reported loss of sync count

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.loss_of_sync_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_loss_of_syncUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_firmware_systat_discard_frames","title":"fcvi_firmware_systat_discard_frames","text":"

      Firmware reported SyStatDiscardFrames value

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi firmware.systat.discard_framesUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi fw_SyStatDiscardFramesUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_hard_reset_count","title":"fcvi_hard_reset_count","text":"

      Number of times hard reset of FCVI adapter got issued.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi hard_reset_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi hard_reset_cntUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_rdma_write_avg_latency","title":"fcvi_rdma_write_avg_latency","text":"

      Average RDMA write I/O latency.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi rdma.write_average_latencyUnit: microsecType: averageBase: rdma.write_ops conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi rdma_write_avg_latencyUnit: microsecType: averageBase: rdma_write_ops conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_rdma_write_ops","title":"fcvi_rdma_write_ops","text":"

      Number of RDMA write I/Os issued per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi rdma.write_opsUnit: noneType: rateBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi rdma_write_opsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_rdma_write_throughput","title":"fcvi_rdma_write_throughput","text":"

      RDMA write throughput in bytes per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi rdma.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi rdma_write_throughputUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#fcvi_soft_reset_count","title":"fcvi_soft_reset_count","text":"

      Number of times soft reset of FCVI adapter got issued.

      API Endpoint Metric Template REST api/cluster/counter/tables/fcvi soft_reset_countUnit: noneType: deltaBase: conf/restperf/9.12.0/fcvi.yaml ZAPI perf-object-get-instances fcvi soft_reset_cntUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/fcvi.yaml"},{"location":"ontap-metrics/#flashcache_accesses","title":"flashcache_accesses","text":"

      External cache accesses per second

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache accessesUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj accessesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_disk_reads_replaced","title":"flashcache_disk_reads_replaced","text":"

      Estimated number of disk reads per second replaced by cache

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache disk_reads_replacedUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj disk_reads_replacedUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_evicts","title":"flashcache_evicts","text":"

      Number of blocks evicted from the external cache to make room for new blocks

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache evictsUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj evictsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit","title":"flashcache_hit","text":"

      Number of WAFL buffers served off the external cache

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.totalUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hitUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_directory","title":"flashcache_hit_directory","text":"

      Number of directory buffers served off the external cache

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.directoryUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_directoryUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_indirect","title":"flashcache_hit_indirect","text":"

      Number of indirect file buffers served off the external cache

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.indirectUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_indirectUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_metadata_file","title":"flashcache_hit_metadata_file","text":"

      Number of metadata file buffers served off the external cache

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.metadata_fileUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_metadata_fileUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_normal_lev0","title":"flashcache_hit_normal_lev0","text":"

      Number of normal level 0 WAFL buffers served off the external cache

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.normal_level_zeroUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_normal_lev0Unit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_hit_percent","title":"flashcache_hit_percent","text":"

      External cache hit rate

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache hit.percentUnit: percentType: averageBase: accesses conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj hit_percentUnit: percentType: percentBase: accesses conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_inserts","title":"flashcache_inserts","text":"

      Number of WAFL buffers inserted into the external cache

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache insertsUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj insertsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_invalidates","title":"flashcache_invalidates","text":"

      Number of blocks invalidated in the external cache

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache invalidatesUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj invalidatesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss","title":"flashcache_miss","text":"

      External cache misses

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.totalUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj missUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss_directory","title":"flashcache_miss_directory","text":"

      External cache misses accessing directory buffers

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.directoryUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj miss_directoryUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss_indirect","title":"flashcache_miss_indirect","text":"

      External cache misses accessing indirect file buffers

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.indirectUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj miss_indirectUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss_metadata_file","title":"flashcache_miss_metadata_file","text":"

      External cache misses accessing metadata file buffers

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.metadata_fileUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj miss_metadata_fileUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_miss_normal_lev0","title":"flashcache_miss_normal_lev0","text":"

      External cache misses accessing normal level 0 buffers

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache miss.normal_level_zeroUnit: per_secType: rateBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj miss_normal_lev0Unit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashcache_usage","title":"flashcache_usage","text":"

      Percentage of blocks in external cache currently containing valid data

      API Endpoint Metric Template REST api/cluster/counter/tables/external_cache usageUnit: percentType: rawBase: conf/restperf/9.12.0/ext_cache_obj.yaml ZAPI perf-object-get-instances ext_cache_obj usageUnit: percentType: rawBase: conf/zapiperf/cdot/9.8.0/ext_cache_obj.yaml"},{"location":"ontap-metrics/#flashpool_cache_stats","title":"flashpool_cache_stats","text":"

      Automated Working-set Analyzer (AWA) per-interval pseudo cache statistics for the most recent intervals. The number of intervals defined as recent is CM_WAFL_HYAS_INT_DIS_CNT. This array is a table with fields corresponding to the enum type of hyas_cache_stat_type_t.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_sizer cache_statsUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_sizer.yaml ZAPI perf-object-get-instances wafl_hya_sizer cache_statsUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/wafl_hya_sizer.yaml"},{"location":"ontap-metrics/#flashpool_evict_destage_rate","title":"flashpool_evict_destage_rate","text":"

      Number of block destage per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate evict_destage_rateUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr evict_destage_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_evict_remove_rate","title":"flashpool_evict_remove_rate","text":"

      Number of block free per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate evict_remove_rateUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr evict_remove_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_hya_read_hit_latency_average","title":"flashpool_hya_read_hit_latency_average","text":"

      Average of RAID I/O latency on read hit.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate hya_read_hit_latency_averageUnit: millisecType: averageBase: hya_read_hit_latency_count conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr hya_read_hit_latency_averageUnit: millisecType: averageBase: hya_read_hit_latency_count conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_hya_read_miss_latency_average","title":"flashpool_hya_read_miss_latency_average","text":"

      Average read miss latency.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate hya_read_miss_latency_averageUnit: millisecType: averageBase: hya_read_miss_latency_count conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr hya_read_miss_latency_averageUnit: millisecType: averageBase: hya_read_miss_latency_count conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_hya_write_hdd_latency_average","title":"flashpool_hya_write_hdd_latency_average","text":"

      Average write latency to HDD.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate hya_write_hdd_latency_averageUnit: millisecType: averageBase: hya_write_hdd_latency_count conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr hya_write_hdd_latency_averageUnit: millisecType: averageBase: hya_write_hdd_latency_count conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_hya_write_ssd_latency_average","title":"flashpool_hya_write_ssd_latency_average","text":"

      Average of RAID I/O latency on write to SSD.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate hya_write_ssd_latency_averageUnit: millisecType: averageBase: hya_write_ssd_latency_count conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr hya_write_ssd_latency_averageUnit: millisecType: averageBase: hya_write_ssd_latency_count conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_read_cache_ins_rate","title":"flashpool_read_cache_ins_rate","text":"

      Cache insert rate blocks/sec.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate read_cache_insert_rateUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr read_cache_ins_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_read_ops_replaced","title":"flashpool_read_ops_replaced","text":"

      Number of HDD read operations replaced by SSD reads per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate read_ops_replacedUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr read_ops_replacedUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_read_ops_replaced_percent","title":"flashpool_read_ops_replaced_percent","text":"

      Percentage of HDD read operations replace by SSD.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate read_ops_replaced_percentUnit: percentType: percentBase: read_ops_total conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr read_ops_replaced_percentUnit: percentType: percentBase: read_ops_total conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_available","title":"flashpool_ssd_available","text":"

      Total SSD blocks available.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_availableUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_availableUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_read_cached","title":"flashpool_ssd_read_cached","text":"

      Total read cached SSD blocks.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_read_cachedUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_read_cachedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_total","title":"flashpool_ssd_total","text":"

      Total SSD blocks.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_totalUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_totalUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_total_used","title":"flashpool_ssd_total_used","text":"

      Total SSD blocks used.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_total_usedUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_total_usedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_ssd_write_cached","title":"flashpool_ssd_write_cached","text":"

      Total write cached SSD blocks.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate ssd_write_cachedUnit: noneType: rawBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr ssd_write_cachedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_wc_write_blks_total","title":"flashpool_wc_write_blks_total","text":"

      Number of write-cache blocks written per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate wc_write_blocks_totalUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr wc_write_blks_totalUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_write_blks_replaced","title":"flashpool_write_blks_replaced","text":"

      Number of HDD write blocks replaced by SSD writes per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate write_blocks_replacedUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr write_blks_replacedUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flashpool_write_blks_replaced_percent","title":"flashpool_write_blks_replaced_percent","text":"

      Percentage of blocks overwritten to write-cache among all disk writes.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl_hya_per_aggregate write_blocks_replaced_percentUnit: percentType: averageBase: estimated_write_blocks_total conf/restperf/9.12.0/wafl_hya_per_aggr.yaml ZAPI perf-object-get-instances wafl_hya_per_aggr write_blks_replaced_percentUnit: percentType: averageBase: est_write_blks_total conf/zapiperf/cdot/9.8.0/wafl_hya_per_aggr.yaml"},{"location":"ontap-metrics/#flexcache_blocks_requested_from_client","title":"flexcache_blocks_requested_from_client","text":"

      Total number of blocks requested from client

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume blocks_requested_from_clientUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_blocks_retrieved_from_origin","title":"flexcache_blocks_retrieved_from_origin","text":"

      Total number of blocks retrieved from origin

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume blocks_retrieved_from_originUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_evict_rw_cache_skipped_reason_disconnected","title":"flexcache_evict_rw_cache_skipped_reason_disconnected","text":"

      Total number of read-write cache evict operations skipped because cache is disconnected.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume evict_rw_cache_skipped_reason_disconnectedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_evict_skipped_reason_config_noent","title":"flexcache_evict_skipped_reason_config_noent","text":"

      Total number of evict operation is skipped because cache config is not available.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume evict_skipped_reason_config_noentUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_evict_skipped_reason_disconnected","title":"flexcache_evict_skipped_reason_disconnected","text":"

      Total number of evict operation is skipped because cache is disconnected.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume evict_skipped_reason_disconnectedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_evict_skipped_reason_offline","title":"flexcache_evict_skipped_reason_offline","text":"

      Total number of evict operation is skipped because cache volume is offline.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume evict_skipped_reason_offlineUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_invalidate_skipped_reason_config_noent","title":"flexcache_invalidate_skipped_reason_config_noent","text":"

      Total number of invalidate operation is skipped because cache config is not available.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume invalidate_skipped_reason_config_noentUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_invalidate_skipped_reason_disconnected","title":"flexcache_invalidate_skipped_reason_disconnected","text":"

      Total number of invalidate operation is skipped because cache is disconnected.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume invalidate_skipped_reason_disconnectedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_invalidate_skipped_reason_offline","title":"flexcache_invalidate_skipped_reason_offline","text":"

      Total number of invalidate operation is skipped because cache volume is offline.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume invalidate_skipped_reason_offlineUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_miss_percent","title":"flexcache_miss_percent","text":"

      This metric represents the percentage of block requests from a client that resulted in a \"miss\" in the FlexCache. A \"miss\" occurs when the requested data is not found in the cache and has to be retrieved from the origin volume.

      API Endpoint Metric Template ZAPI flexcache_per_volume blocks_retrieved_from_origin, blocks_requested_from_clientUnit: Type: Base: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_retry_skipped_reason_initiator_retrieve","title":"flexcache_nix_retry_skipped_reason_initiator_retrieve","text":"

      Total retry nix operations skipped because the initiator is retrieve operation.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_retry_skipped_reason_initiator_retrieveUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_skipped_reason_config_noent","title":"flexcache_nix_skipped_reason_config_noent","text":"

      Total number of nix operation is skipped because cache config is not available.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_skipped_reason_config_noentUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_skipped_reason_disconnected","title":"flexcache_nix_skipped_reason_disconnected","text":"

      Total number of nix operation is skipped because cache is disconnected.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_skipped_reason_disconnectedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_skipped_reason_in_progress","title":"flexcache_nix_skipped_reason_in_progress","text":"

      Total nix operations skipped because of an in-progress nix.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_skipped_reason_in_progressUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_nix_skipped_reason_offline","title":"flexcache_nix_skipped_reason_offline","text":"

      Total number of nix operation is skipped because cache volume is offline.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume nix_skipped_reason_offlineUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_reconciled_data_entries","title":"flexcache_reconciled_data_entries","text":"

      Total number of reconciled data entries at cache side.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume reconciled_data_entriesUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_reconciled_lock_entries","title":"flexcache_reconciled_lock_entries","text":"

      Total number of reconciled lock entries at cache side.

      API Endpoint Metric Template ZAPI perf-object-get-instances flexcache_per_volume reconciled_lock_entriesUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#flexcache_size","title":"flexcache_size","text":"

      Physical size of the FlexCache. The recommended size for a FlexCache is 10% of the origin volume. The minimum FlexCache constituent size is 1GB.

      API Endpoint Metric Template REST api/storage/flexcache/flexcaches size conf/rest/9.12.0/flexcache.yaml ZAPI flexcache-get-iter flexcache-info.size conf/zapi/cdot/9.8.0/flexcache.yaml"},{"location":"ontap-metrics/#headroom_aggr_current_latency","title":"headroom_aggr_current_latency","text":"

      This is the storage aggregate average latency per message at the disk level.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate current_latencyUnit: microsecType: averageBase: current_ops conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr current_latencyUnit: microsecType: averageBase: current_ops conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_current_ops","title":"headroom_aggr_current_ops","text":"

      Total number of I/Os processed by the aggregate per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate current_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr current_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_current_utilization","title":"headroom_aggr_current_utilization","text":"

      This is the storage aggregate average utilization of all the data disks in the aggregate.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate current_utilizationUnit: percentType: percentBase: current_utilization_denominator conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr current_utilizationUnit: percentType: percentBase: current_utilization_total conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_ewma_daily","title":"headroom_aggr_ewma_daily","text":"

      Daily exponential weighted moving average.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate ewma.dailyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr ewma_dailyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_ewma_hourly","title":"headroom_aggr_ewma_hourly","text":"

      Hourly exponential weighted moving average.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate ewma.hourlyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr ewma_hourlyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_ewma_monthly","title":"headroom_aggr_ewma_monthly","text":"

      Monthly exponential weighted moving average.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate ewma.monthlyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr ewma_monthlyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_ewma_weekly","title":"headroom_aggr_ewma_weekly","text":"

      Weekly exponential weighted moving average.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate ewma.weeklyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr ewma_weeklyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_optimal_point_confidence_factor","title":"headroom_aggr_optimal_point_confidence_factor","text":"

      The confidence factor for the optimal point value based on the observed resource latency and utilization.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate optimal_point.confidence_factorUnit: noneType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr optimal_point_confidence_factorUnit: noneType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_optimal_point_latency","title":"headroom_aggr_optimal_point_latency","text":"

      The latency component of the optimal point of the latency/utilization curve.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate optimal_point.latencyUnit: microsecType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr optimal_point_latencyUnit: microsecType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_optimal_point_ops","title":"headroom_aggr_optimal_point_ops","text":"

      The ops component of the optimal point derived from the latency/utilzation curve.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate optimal_point.opsUnit: per_secType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr optimal_point_opsUnit: per_secType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_aggr_optimal_point_utilization","title":"headroom_aggr_optimal_point_utilization","text":"

      The utilization component of the optimal point of the latency/utilization curve.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_aggregate optimal_point.utilizationUnit: noneType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_aggr.yaml ZAPI perf-object-get-instances resource_headroom_aggr optimal_point_utilizationUnit: noneType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_aggr.yaml"},{"location":"ontap-metrics/#headroom_cpu_current_latency","title":"headroom_cpu_current_latency","text":"

      Current operation latency of the resource.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu current_latencyUnit: microsecType: averageBase: current_ops conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu current_latencyUnit: microsecType: averageBase: current_ops conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_current_ops","title":"headroom_cpu_current_ops","text":"

      Total number of operations per second (also referred to as dblade ops).

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu current_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu current_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_current_utilization","title":"headroom_cpu_current_utilization","text":"

      Average processor utilization across all processors in the system.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu current_utilizationUnit: percentType: percentBase: elapsed_time conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu current_utilizationUnit: percentType: percentBase: current_utilization_total conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_ewma_daily","title":"headroom_cpu_ewma_daily","text":"

      Daily exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu ewma.dailyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu ewma_dailyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_ewma_hourly","title":"headroom_cpu_ewma_hourly","text":"

      Hourly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu ewma.hourlyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu ewma_hourlyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_ewma_monthly","title":"headroom_cpu_ewma_monthly","text":"

      Monthly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu ewma.monthlyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu ewma_monthlyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_ewma_weekly","title":"headroom_cpu_ewma_weekly","text":"

      Weekly exponential weighted moving average for current_ops, optimal_point_ops, current_latency, optimal_point_latency, current_utilization, optimal_point_utilization and optimal_point_confidence_factor.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu ewma.weeklyUnit: noneType: rawBase: conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu ewma_weeklyUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_optimal_point_confidence_factor","title":"headroom_cpu_optimal_point_confidence_factor","text":"

      Confidence factor for the optimal point value based on the observed resource latency and utilization. The possible values are: 0 - unknown, 1 - low, 2 - medium, 3 - high. This counter can provide an average confidence factor over a range of time.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu optimal_point.confidence_factorUnit: noneType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu optimal_point_confidence_factorUnit: noneType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_optimal_point_latency","title":"headroom_cpu_optimal_point_latency","text":"

      Latency component of the optimal point of the latency/utilization curve. This counter can provide an average latency over a range of time.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu optimal_point.latencyUnit: microsecType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu optimal_point_latencyUnit: microsecType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_optimal_point_ops","title":"headroom_cpu_optimal_point_ops","text":"

      Ops component of the optimal point derived from the latency/utilization curve. This counter can provide an average ops over a range of time.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu optimal_point.opsUnit: per_secType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu optimal_point_opsUnit: per_secType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#headroom_cpu_optimal_point_utilization","title":"headroom_cpu_optimal_point_utilization","text":"

      Utilization component of the optimal point of the latency/utilization curve. This counter can provide an average utilization over a range of time.

      API Endpoint Metric Template REST api/cluster/counter/tables/headroom_cpu optimal_point.utilizationUnit: noneType: averageBase: optimal_point.samples conf/restperf/9.12.0/resource_headroom_cpu.yaml ZAPI perf-object-get-instances resource_headroom_cpu optimal_point_utilizationUnit: noneType: averageBase: optimal_point_samples conf/zapiperf/cdot/9.8.0/resource_headroom_cpu.yaml"},{"location":"ontap-metrics/#hostadapter_bytes_read","title":"hostadapter_bytes_read","text":"

      Bytes read through a host adapter

      API Endpoint Metric Template REST api/cluster/counter/tables/host_adapter bytes_readUnit: per_secType: rateBase: conf/restperf/9.12.0/hostadapter.yaml ZAPI perf-object-get-instances hostadapter bytes_readUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/hostadapter.yaml"},{"location":"ontap-metrics/#hostadapter_bytes_written","title":"hostadapter_bytes_written","text":"

      Bytes written through a host adapter

      API Endpoint Metric Template REST api/cluster/counter/tables/host_adapter bytes_writtenUnit: per_secType: rateBase: conf/restperf/9.12.0/hostadapter.yaml ZAPI perf-object-get-instances hostadapter bytes_writtenUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/hostadapter.yaml"},{"location":"ontap-metrics/#iscsi_lif_avg_latency","title":"iscsi_lif_avg_latency","text":"

      Average latency for iSCSI operations

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif average_latencyUnit: microsecType: averageBase: cmd_transferred conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif avg_latencyUnit: microsecType: averageBase: cmd_transfered conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_avg_other_latency","title":"iscsi_lif_avg_other_latency","text":"

      Average latency for operations other than read and write (for example, Inquiry, Report LUNs, SCSI Task Management Functions)

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif average_other_latencyUnit: microsecType: averageBase: iscsi_other_ops conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif avg_other_latencyUnit: microsecType: averageBase: iscsi_other_ops conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_avg_read_latency","title":"iscsi_lif_avg_read_latency","text":"

      Average latency for read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif average_read_latencyUnit: microsecType: averageBase: iscsi_read_ops conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif avg_read_latencyUnit: microsecType: averageBase: iscsi_read_ops conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_avg_write_latency","title":"iscsi_lif_avg_write_latency","text":"

      Average latency for write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif average_write_latencyUnit: microsecType: averageBase: iscsi_write_ops conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif avg_write_latencyUnit: microsecType: averageBase: iscsi_write_ops conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_cmd_transfered","title":"iscsi_lif_cmd_transfered","text":"

      Command transferred by this iSCSI connection

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif cmd_transferredUnit: noneType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif cmd_transferedUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_iscsi_other_ops","title":"iscsi_lif_iscsi_other_ops","text":"

      iSCSI other operations per second on this logical interface (LIF)

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif iscsi_other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif iscsi_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_iscsi_read_ops","title":"iscsi_lif_iscsi_read_ops","text":"

      iSCSI read operations per second on this logical interface (LIF)

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif iscsi_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif iscsi_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_iscsi_write_ops","title":"iscsi_lif_iscsi_write_ops","text":"

      iSCSI write operations per second on this logical interface (LIF)

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif iscsi_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif iscsi_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_protocol_errors","title":"iscsi_lif_protocol_errors","text":"

      Number of protocol errors from iSCSI sessions on this logical interface (LIF)

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif protocol_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif protocol_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_read_data","title":"iscsi_lif_read_data","text":"

      Amount of data read from the storage system in bytes

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iscsi_lif_write_data","title":"iscsi_lif_write_data","text":"

      Amount of data written to the storage system in bytes

      API Endpoint Metric Template REST api/cluster/counter/tables/iscsi_lif write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/iscsi_lif.yaml ZAPI perf-object-get-instances iscsi_lif write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/iscsi_lif.yaml"},{"location":"ontap-metrics/#iw_avg_latency","title":"iw_avg_latency","text":"

      Average RDMA I/O latency.

      API Endpoint Metric Template REST api/cluster/counter/tables/iwarp average_latencyUnit: microsecType: averageBase: ops conf/restperf/9.14.1/iwarp.yaml ZAPI perf-object-get-instances iwarp iw_avg_latencyUnit: microsecType: averageBase: iw_ops conf/zapiperf/cdot/9.8.0/iwarp.yaml"},{"location":"ontap-metrics/#iw_ops","title":"iw_ops","text":"

      Number of RDMA I/Os issued.

      API Endpoint Metric Template REST api/cluster/counter/tables/iwarp opsUnit: noneType: rateBase: conf/restperf/9.14.1/iwarp.yaml ZAPI perf-object-get-instances iwarp iw_opsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/iwarp.yaml"},{"location":"ontap-metrics/#iw_read_ops","title":"iw_read_ops","text":"

      Number of RDMA read I/Os issued.

      API Endpoint Metric Template REST api/cluster/counter/tables/iwarp read_opsUnit: noneType: rateBase: conf/restperf/9.14.1/iwarp.yaml ZAPI perf-object-get-instances iwarp iw_read_opsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/iwarp.yaml"},{"location":"ontap-metrics/#iw_write_ops","title":"iw_write_ops","text":"

      Number of RDMA write I/Os issued.

      API Endpoint Metric Template REST api/cluster/counter/tables/iwarp write_opsUnit: noneType: rateBase: conf/restperf/9.14.1/iwarp.yaml ZAPI perf-object-get-instances iwarp iw_write_opsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/iwarp.yaml"},{"location":"ontap-metrics/#lif_recv_data","title":"lif_recv_data","text":"

      Number of bytes received per second

      API Endpoint Metric Template REST api/cluster/counter/tables/lif received_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif recv_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_recv_errors","title":"lif_recv_errors","text":"

      Number of received Errors per second

      API Endpoint Metric Template REST api/cluster/counter/tables/lif received_errorsUnit: per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif recv_errorsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_recv_packet","title":"lif_recv_packet","text":"

      Number of packets received per second

      API Endpoint Metric Template REST api/cluster/counter/tables/lif received_packetsUnit: per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif recv_packetUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_sent_data","title":"lif_sent_data","text":"

      Number of bytes sent per second

      API Endpoint Metric Template REST api/cluster/counter/tables/lif sent_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif sent_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_sent_errors","title":"lif_sent_errors","text":"

      Number of sent errors per second

      API Endpoint Metric Template REST api/cluster/counter/tables/lif sent_errorsUnit: per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif sent_errorsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lif_sent_packet","title":"lif_sent_packet","text":"

      Number of packets sent per second

      API Endpoint Metric Template REST api/cluster/counter/tables/lif sent_packetsUnit: per_secType: rateBase: conf/restperf/9.12.0/lif.yaml ZAPI perf-object-get-instances lif sent_packetUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lif.yaml"},{"location":"ontap-metrics/#lun_avg_read_latency","title":"lun_avg_read_latency","text":"

      Average read latency in microseconds for all operations on the LUN

      API Endpoint Metric Template REST api/cluster/counter/tables/lun average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_avg_write_latency","title":"lun_avg_write_latency","text":"

      Average write latency in microseconds for all operations on the LUN

      API Endpoint Metric Template REST api/cluster/counter/tables/lun average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_avg_xcopy_latency","title":"lun_avg_xcopy_latency","text":"

      Average latency in microseconds for xcopy requests

      API Endpoint Metric Template REST api/cluster/counter/tables/lun average_xcopy_latencyUnit: microsecType: averageBase: xcopy_requests conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun avg_xcopy_latencyUnit: microsecType: averageBase: xcopy_reqs conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_caw_reqs","title":"lun_caw_reqs","text":"

      Number of compare and write requests

      API Endpoint Metric Template REST api/cluster/counter/tables/lun caw_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun caw_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_enospc","title":"lun_enospc","text":"

      Number of operations receiving ENOSPC errors

      API Endpoint Metric Template REST api/cluster/counter/tables/lun enospcUnit: noneType: deltaBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun enospcUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_queue_full","title":"lun_queue_full","text":"

      Queue full responses

      API Endpoint Metric Template REST api/cluster/counter/tables/lun queue_fullUnit: per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun queue_fullUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_read_align_histo","title":"lun_read_align_histo","text":"

      Histogram of WAFL read alignment (number sectors off WAFL block start)

      API Endpoint Metric Template REST api/cluster/counter/tables/lun read_align_histogramUnit: percentType: percentBase: read_ops_sent conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun read_align_histoUnit: percentType: percentBase: read_ops_sent conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_read_data","title":"lun_read_data","text":"

      Read bytes

      API Endpoint Metric Template REST api/cluster/counter/tables/lun read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_read_ops","title":"lun_read_ops","text":"

      Number of read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/lun read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_read_partial_blocks","title":"lun_read_partial_blocks","text":"

      Percentage of reads whose size is not a multiple of WAFL block size

      API Endpoint Metric Template REST api/cluster/counter/tables/lun read_partial_blocksUnit: percentType: percentBase: read_ops conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun read_partial_blocksUnit: percentType: percentBase: read_ops conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_remote_bytes","title":"lun_remote_bytes","text":"

      I/O to or from a LUN which is not owned by the storage system handling the I/O.

      API Endpoint Metric Template REST api/cluster/counter/tables/lun remote_bytesUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun remote_bytesUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_remote_ops","title":"lun_remote_ops","text":"

      Number of operations received by a storage system that does not own the LUN targeted by the operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/lun remote_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun remote_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_size","title":"lun_size","text":"

      The total provisioned size of the LUN. The LUN size can be increased but not be made smaller using the REST interface.The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The actual minimum and maxiumum sizes vary depending on the ONTAP version, ONTAP platform and the available space in the containing volume and aggregate.For more information, see Size properties in the docs section of the ONTAP REST API documentation.

      API Endpoint Metric Template REST api/storage/luns space.size conf/rest/9.12.0/lun.yaml ZAPI lun-get-iter lun-info.size conf/zapi/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_size_used","title":"lun_size_used","text":"

      The amount of space consumed by the main data stream of the LUN.This value is the total space consumed in the volume by the LUN, including filesystem overhead, but excluding prefix and suffix streams. Due to internal filesystem overhead and the many ways SAN filesystems and applications utilize blocks within a LUN, this value does not necessarily reflect actual consumption/availability from the perspective of the filesystem or application. Without specific knowledge of how the LUN blocks are utilized outside of ONTAP, this property should not be used as an indicator for an out-of-space condition.For more information, see Size properties in the docs section of the ONTAP REST API documentation.

      API Endpoint Metric Template REST api/storage/luns space.used conf/rest/9.12.0/lun.yaml ZAPI lun-get-iter lun-info.size-used conf/zapi/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_size_used_percent","title":"lun_size_used_percent","text":"

      This metric represents the percentage of a LUN that is currently being used.

      API Endpoint Metric Template REST api/storage/luns size_used, size conf/rest/9.12.0/lun.yaml ZAPI lun-get-iter size_used, size conf/zapi/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_unmap_reqs","title":"lun_unmap_reqs","text":"

      Number of unmap command requests

      API Endpoint Metric Template REST api/cluster/counter/tables/lun unmap_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun unmap_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_write_align_histo","title":"lun_write_align_histo","text":"

      Histogram of WAFL write alignment (number of sectors off WAFL block start)

      API Endpoint Metric Template REST api/cluster/counter/tables/lun write_align_histogramUnit: percentType: percentBase: write_ops_sent conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun write_align_histoUnit: percentType: percentBase: write_ops_sent conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_write_data","title":"lun_write_data","text":"

      Write bytes

      API Endpoint Metric Template REST api/cluster/counter/tables/lun write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_write_ops","title":"lun_write_ops","text":"

      Number of write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/lun write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_write_partial_blocks","title":"lun_write_partial_blocks","text":"

      Percentage of writes whose size is not a multiple of WAFL block size

      API Endpoint Metric Template REST api/cluster/counter/tables/lun write_partial_blocksUnit: percentType: percentBase: write_ops conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun write_partial_blocksUnit: percentType: percentBase: write_ops conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_writesame_reqs","title":"lun_writesame_reqs","text":"

      Number of write same command requests

      API Endpoint Metric Template REST api/cluster/counter/tables/lun writesame_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun writesame_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_writesame_unmap_reqs","title":"lun_writesame_unmap_reqs","text":"

      Number of write same commands requests with unmap bit set

      API Endpoint Metric Template REST api/cluster/counter/tables/lun writesame_unmap_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun writesame_unmap_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#lun_xcopy_reqs","title":"lun_xcopy_reqs","text":"

      Total number of xcopy operations on the LUN

      API Endpoint Metric Template REST api/cluster/counter/tables/lun xcopy_requestsUnit: noneType: rateBase: conf/restperf/9.12.0/lun.yaml ZAPI perf-object-get-instances lun xcopy_reqsUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/lun.yaml"},{"location":"ontap-metrics/#metadata_collector_api_time","title":"metadata_collector_api_time","text":"

      amount of time to collect data from monitored cluster object

      API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_calc_time","title":"metadata_collector_calc_time","text":"

      amount of time it took to compute metrics between two successive polls, specifically using properties like raw, delta, rate, average, and percent. This metric is available for ZapiPerf/RestPerf collectors.

      API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_instances","title":"metadata_collector_instances","text":"

      number of objects collected from monitored cluster

      API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_collector_metrics","title":"metadata_collector_metrics","text":"

      number of counters collected from monitored cluster

      API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_collector_parse_time","title":"metadata_collector_parse_time","text":"

      amount of time to parse XML, JSON, etc. for cluster object

      API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_plugin_time","title":"metadata_collector_plugin_time","text":"

      amount of time for all plugins to post-process metrics

      API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_poll_time","title":"metadata_collector_poll_time","text":"

      amount of time it took for the poll to finish

      API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_collector_skips","title":"metadata_collector_skips","text":"

      number of metrics that were not calculated between two successive polls. This metric is available for ZapiPerf/RestPerf collectors.

      API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_collector_task_time","title":"metadata_collector_task_time","text":"

      amount of time it took for each collector's subtasks to complete

      API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_component_count","title":"metadata_component_count","text":"

      number of metrics collected for each object

      API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_component_status","title":"metadata_component_status","text":"

      status of the collector - 0 means running, 1 means standby, 2 means failed

      API Endpoint Metric Template REST NA Harvest generatedUnit: enum NA ZAPI NA Harvest generatedUnit: enum NA"},{"location":"ontap-metrics/#metadata_exporter_count","title":"metadata_exporter_count","text":"

      number of metrics and labels exported

      API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_exporter_time","title":"metadata_exporter_time","text":"

      amount of time it took to render, export, and serve exported data

      API Endpoint Metric Template REST NA Harvest generatedUnit: microseconds NA ZAPI NA Harvest generatedUnit: microseconds NA"},{"location":"ontap-metrics/#metadata_target_goroutines","title":"metadata_target_goroutines","text":"

      number of goroutines that exist within the poller

      API Endpoint Metric Template REST NA Harvest generatedUnit: scalar NA ZAPI NA Harvest generatedUnit: scalar NA"},{"location":"ontap-metrics/#metadata_target_status","title":"metadata_target_status","text":"

      status of the system being monitored. 0 means reachable, 1 means unreachable

      API Endpoint Metric Template REST NA Harvest generatedUnit: enum NA ZAPI NA Harvest generatedUnit: enum NA"},{"location":"ontap-metrics/#metrocluster_check_aggr_status","title":"metrocluster_check_aggr_status","text":"

      Detail of the type of diagnostic operation run for the Aggregate with diagnostic operation result.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/metrocluster_check.yaml"},{"location":"ontap-metrics/#metrocluster_check_cluster_status","title":"metrocluster_check_cluster_status","text":"

      Detail of the type of diagnostic operation run for the Cluster with diagnostic operation result.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/metrocluster_check.yaml"},{"location":"ontap-metrics/#metrocluster_check_node_status","title":"metrocluster_check_node_status","text":"

      Detail of the type of diagnostic operation run for the Node with diagnostic operation result.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/metrocluster_check.yaml"},{"location":"ontap-metrics/#metrocluster_check_volume_status","title":"metrocluster_check_volume_status","text":"

      Detail of the type of diagnostic operation run for the Volume with diagnostic operation result.

      API Endpoint Metric Template REST NA Harvest generated conf/rest/9.12.0/metrocluster_check.yaml"},{"location":"ontap-metrics/#namespace_avg_other_latency","title":"namespace_avg_other_latency","text":"

      Average other ops latency in microseconds for all operations on the Namespace

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_avg_read_latency","title":"namespace_avg_read_latency","text":"

      Average read latency in microseconds for all operations on the Namespace

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_avg_write_latency","title":"namespace_avg_write_latency","text":"

      Average write latency in microseconds for all operations on the Namespace

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_block_size","title":"namespace_block_size","text":"

      The size of blocks in the namespace in bytes.Valid in POST when creating an NVMe namespace that is not a clone of another. Disallowed in POST when creating a namespace clone. Valid in POST.

      API Endpoint Metric Template REST api/storage/namespaces space.block_size conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter nvme-namespace-info.block-size conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_other_ops","title":"namespace_other_ops","text":"

      Number of other operations

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_read_data","title":"namespace_read_data","text":"

      Read bytes

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_read_ops","title":"namespace_read_ops","text":"

      Number of read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_remote_bytes","title":"namespace_remote_bytes","text":"

      Remote read bytes

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace remote.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace remote_bytesUnit: Type: Base: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_remote_ops","title":"namespace_remote_ops","text":"

      Number of remote read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace remote.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace remote_opsUnit: Type: Base: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_size","title":"namespace_size","text":"

      The total provisioned size of the NVMe namespace. Valid in POST and PATCH. The NVMe namespace size can be increased but not be made smaller using the REST interface.The maximum and minimum sizes listed here are the absolute maximum and absolute minimum sizes in bytes. The maximum size is variable with respect to large NVMe namespace support in ONTAP. If large namespaces are supported, the maximum size is 128 TB (140737488355328 bytes) and if not supported, the maximum size is just under 16 TB (17557557870592 bytes). The minimum size supported is always 4096 bytes.For more information, see Size properties in the docs section of the ONTAP REST API documentation.

      API Endpoint Metric Template REST api/storage/namespaces space.size conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter nvme-namespace-info.size conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_size_available","title":"namespace_size_available","text":"

      This metric represents the amount of available space in a namespace.

      API Endpoint Metric Template REST api/storage/namespaces size, size_used conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter size, size_used conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_size_available_percent","title":"namespace_size_available_percent","text":"

      This metric represents the percentage of available space in a namespace.

      API Endpoint Metric Template REST api/storage/namespaces size_available, size conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter size_available, size conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_size_used","title":"namespace_size_used","text":"

      The amount of space consumed by the main data stream of the NVMe namespace.This value is the total space consumed in the volume by the NVMe namespace, including filesystem overhead, but excluding prefix and suffix streams. Due to internal filesystem overhead and the many ways NVMe filesystems and applications utilize blocks within a namespace, this value does not necessarily reflect actual consumption/availability from the perspective of the filesystem or application. Without specific knowledge of how the namespace blocks are utilized outside of ONTAP, this property should not be used and an indicator for an out-of-space condition.For more information, see Size properties in the docs section of the ONTAP REST API documentation.

      API Endpoint Metric Template REST api/storage/namespaces space.used conf/rest/9.12.0/namespace.yaml ZAPI nvme-namespace-get-iter nvme-namespace-info.size-used conf/zapi/cdot/9.8.0/namespace.yaml"},{"location":"ontap-metrics/#namespace_write_data","title":"namespace_write_data","text":"

      Write bytes

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#namespace_write_ops","title":"namespace_write_ops","text":"

      Number of write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/namespace write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/namespace.yaml ZAPI perf-object-get-instances namespace write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/namespace.yaml"},{"location":"ontap-metrics/#ndmp_session_data_bytes_processed","title":"ndmp_session_data_bytes_processed","text":"

      Indicates the NDMP data bytes processed.

      API Endpoint Metric Template REST api/protocols/ndmp/sessions data.bytes_processed conf/rest/9.7.0/ndmp_session.yaml"},{"location":"ontap-metrics/#ndmp_session_mover_bytes_moved","title":"ndmp_session_mover_bytes_moved","text":"

      Indicates the NDMP mover bytes moved.

      API Endpoint Metric Template REST api/protocols/ndmp/sessions mover.bytes_moved conf/rest/9.7.0/ndmp_session.yaml"},{"location":"ontap-metrics/#net_port_mtu","title":"net_port_mtu","text":"

      Maximum transmission unit, largest packet size on this network

      API Endpoint Metric Template REST api/network/ethernet/ports mtu conf/rest/9.12.0/netport.yaml ZAPI net-port-get-iter net-port-info.mtu conf/zapi/cdot/9.8.0/netport.yaml"},{"location":"ontap-metrics/#netstat_bytes_recvd","title":"netstat_bytes_recvd","text":"

      Number of bytes received by a TCP connection

      API Endpoint Metric Template ZAPI perf-object-get-instances netstat bytes_recvdUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_bytes_sent","title":"netstat_bytes_sent","text":"

      Number of bytes sent by a TCP connection

      API Endpoint Metric Template ZAPI perf-object-get-instances netstat bytes_sentUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_cong_win","title":"netstat_cong_win","text":"

      Congestion window of a TCP connection

      API Endpoint Metric Template ZAPI perf-object-get-instances netstat cong_winUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_cong_win_th","title":"netstat_cong_win_th","text":"

      Congestion window threshold of a TCP connection

      API Endpoint Metric Template ZAPI perf-object-get-instances netstat cong_win_thUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_ooorcv_pkts","title":"netstat_ooorcv_pkts","text":"

      Number of out-of-order packets received by this TCP connection

      API Endpoint Metric Template ZAPI perf-object-get-instances netstat ooorcv_pktsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_recv_window","title":"netstat_recv_window","text":"

      Receive window size of a TCP connection

      API Endpoint Metric Template ZAPI perf-object-get-instances netstat recv_windowUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_rexmit_pkts","title":"netstat_rexmit_pkts","text":"

      Number of packets retransmitted by this TCP connection

      API Endpoint Metric Template ZAPI perf-object-get-instances netstat rexmit_pktsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#netstat_send_window","title":"netstat_send_window","text":"

      Send window size of a TCP connection

      API Endpoint Metric Template ZAPI perf-object-get-instances netstat send_windowUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/netstat.yaml"},{"location":"ontap-metrics/#nfs_clients_idle_duration","title":"nfs_clients_idle_duration","text":"

      Specifies an ISO-8601 format of date and time to retrieve the idle time duration in hours, minutes, and seconds format.

      API Endpoint Metric Template REST api/protocols/nfs/connected-clients idle_duration conf/rest/9.7.0/nfs_clients.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_bytelockalloc","title":"nfs_diag_storePool_ByteLockAlloc","text":"

      Current number of byte range lock objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.byte_lock_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ByteLockAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_bytelockmax","title":"nfs_diag_storePool_ByteLockMax","text":"

      Maximum number of byte range lock objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.byte_lock_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ByteLockMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_clientalloc","title":"nfs_diag_storePool_ClientAlloc","text":"

      Current number of client objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.client_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ClientAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_clientmax","title":"nfs_diag_storePool_ClientMax","text":"

      Maximum number of client objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.client_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ClientMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_connectionparentsessionreferencealloc","title":"nfs_diag_storePool_ConnectionParentSessionReferenceAlloc","text":"

      Current number of connection parent session reference objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.connection_parent_session_reference_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ConnectionParentSessionReferenceAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_connectionparentsessionreferencemax","title":"nfs_diag_storePool_ConnectionParentSessionReferenceMax","text":"

      Maximum number of connection parent session reference objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.connection_parent_session_reference_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_ConnectionParentSessionReferenceMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_copystatealloc","title":"nfs_diag_storePool_CopyStateAlloc","text":"

      Current number of copy state objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.copy_state_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_CopyStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_copystatemax","title":"nfs_diag_storePool_CopyStateMax","text":"

      Maximum number of copy state objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.copy_state_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_CopyStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_delegalloc","title":"nfs_diag_storePool_DelegAlloc","text":"

      Current number of delegation lock objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.delegation_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_DelegAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_delegmax","title":"nfs_diag_storePool_DelegMax","text":"

      Maximum number delegation lock objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.delegation_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_DelegMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_delegstatealloc","title":"nfs_diag_storePool_DelegStateAlloc","text":"

      Current number of delegation state objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.delegation_state_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_DelegStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_delegstatemax","title":"nfs_diag_storePool_DelegStateMax","text":"

      Maximum number of delegation state objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.delegation_state_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_DelegStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_layoutalloc","title":"nfs_diag_storePool_LayoutAlloc","text":"

      Current number of layout objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.layout_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LayoutAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_layoutmax","title":"nfs_diag_storePool_LayoutMax","text":"

      Maximum number of layout objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.layout_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LayoutMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_layoutstatealloc","title":"nfs_diag_storePool_LayoutStateAlloc","text":"

      Current number of layout state objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.layout_state_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LayoutStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_layoutstatemax","title":"nfs_diag_storePool_LayoutStateMax","text":"

      Maximum number of layout state objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.layout_state_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LayoutStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_lockstatealloc","title":"nfs_diag_storePool_LockStateAlloc","text":"

      Current number of lock state objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.lock_state_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LockStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_lockstatemax","title":"nfs_diag_storePool_LockStateMax","text":"

      Maximum number of lock state objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.lock_state_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_LockStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_openalloc","title":"nfs_diag_storePool_OpenAlloc","text":"

      Current number of share objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.open_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OpenAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_openmax","title":"nfs_diag_storePool_OpenMax","text":"

      Maximum number of share lock objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.open_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OpenMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_openstatealloc","title":"nfs_diag_storePool_OpenStateAlloc","text":"

      Current number of open state objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.openstate_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OpenStateAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_openstatemax","title":"nfs_diag_storePool_OpenStateMax","text":"

      Maximum number of open state objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.openstate_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OpenStateMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_owneralloc","title":"nfs_diag_storePool_OwnerAlloc","text":"

      Current number of owner objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.owner_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OwnerAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_ownermax","title":"nfs_diag_storePool_OwnerMax","text":"

      Maximum number of owner objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.owner_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_OwnerMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionalloc","title":"nfs_diag_storePool_SessionAlloc","text":"

      Current number of session objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionconnectionholderalloc","title":"nfs_diag_storePool_SessionConnectionHolderAlloc","text":"

      Current number of session connection holder objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_connection_holder_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionConnectionHolderAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionconnectionholdermax","title":"nfs_diag_storePool_SessionConnectionHolderMax","text":"

      Maximum number of session connection holder objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_connection_holder_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionConnectionHolderMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionholderalloc","title":"nfs_diag_storePool_SessionHolderAlloc","text":"

      Current number of session holder objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_holder_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionHolderAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionholdermax","title":"nfs_diag_storePool_SessionHolderMax","text":"

      Maximum number of session holder objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_holder_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionHolderMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_sessionmax","title":"nfs_diag_storePool_SessionMax","text":"

      Maximum number of session objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.session_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_SessionMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_staterefhistoryalloc","title":"nfs_diag_storePool_StateRefHistoryAlloc","text":"

      Current number of state reference callstack history objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.state_reference_history_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_StateRefHistoryAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_staterefhistorymax","title":"nfs_diag_storePool_StateRefHistoryMax","text":"

      Maximum number of state reference callstack history objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.state_reference_history_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_StateRefHistoryMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_stringalloc","title":"nfs_diag_storePool_StringAlloc","text":"

      Current number of string objects allocated.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.string_allocatedUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_StringAllocUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nfs_diag_storepool_stringmax","title":"nfs_diag_storePool_StringMax","text":"

      Maximum number of string objects.

      API Endpoint Metric Template REST api/cluster/counter/tables/nfs_v4_diag storepool.string_maximumUnit: noneType: rawBase: conf/restperf/9.12.0/nfsv4_pool.yaml ZAPI perf-object-get-instances nfsv4_diag storePool_StringMaxUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_pool.yaml"},{"location":"ontap-metrics/#nic_link_up_to_downs","title":"nic_link_up_to_downs","text":"

      Number of link state change from UP to DOWN.

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common link_up_to_downUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common link_up_to_downsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_alignment_errors","title":"nic_rx_alignment_errors","text":"

      Alignment errors detected on received packets

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_alignment_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_alignment_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_bytes","title":"nic_rx_bytes","text":"

      Bytes received

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_bytesUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_bytesUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_crc_errors","title":"nic_rx_crc_errors","text":"

      CRC errors detected on received packets

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_crc_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_crc_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_errors","title":"nic_rx_errors","text":"

      Error received

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_errorsUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_errorsUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_length_errors","title":"nic_rx_length_errors","text":"

      Length errors detected on received packets

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_length_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_length_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_rx_total_errors","title":"nic_rx_total_errors","text":"

      Total errors received

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common receive_total_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common rx_total_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_tx_bytes","title":"nic_tx_bytes","text":"

      Bytes sent

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common transmit_bytesUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common tx_bytesUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_tx_errors","title":"nic_tx_errors","text":"

      Error sent

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common transmit_errorsUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common tx_errorsUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_tx_hw_errors","title":"nic_tx_hw_errors","text":"

      Transmit errors reported by hardware

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common transmit_hw_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common tx_hw_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#nic_tx_total_errors","title":"nic_tx_total_errors","text":"

      Total errors sent

      API Endpoint Metric Template REST api/cluster/counter/tables/nic_common transmit_total_errorsUnit: noneType: deltaBase: conf/restperf/9.12.0/nic_common.yaml ZAPI perf-object-get-instances nic_common tx_total_errorsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/nic_common.yaml"},{"location":"ontap-metrics/#node_avg_processor_busy","title":"node_avg_processor_busy","text":"

      Average processor utilization across active processors in the system

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node average_processor_busy_percentUnit: percentType: percentBase: cpu_elapsed_time conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node avg_processor_busyUnit: percentType: percentBase: cpu_elapsed_time conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_cifs_connections","title":"node_cifs_connections","text":"

      Number of connections

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node connectionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node connectionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_established_sessions","title":"node_cifs_established_sessions","text":"

      Number of established SMB and SMB2 sessions

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node established_sessionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node established_sessionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_latency","title":"node_cifs_latency","text":"

      Average latency for CIFS operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node latencyUnit: microsecType: averageBase: latency_base conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_latencyUnit: microsecType: averageBase: cifs_latency_base conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_op_count","title":"node_cifs_op_count","text":"

      Array of select CIFS operation counts

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node op_countUnit: noneType: rateBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_op_countUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_open_files","title":"node_cifs_open_files","text":"

      Number of open files over SMB and SMB2

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node open_filesUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node open_filesUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_ops","title":"node_cifs_ops","text":"

      Number of CIFS operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node cifs_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node cifs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_cifs_read_latency","title":"node_cifs_read_latency","text":"

      Average latency for CIFS read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node average_read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_read_latencyUnit: microsecType: averageBase: cifs_read_ops conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_read_ops","title":"node_cifs_read_ops","text":"

      Total number of CIFS read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_total_ops","title":"node_cifs_total_ops","text":"

      Total number of CIFS operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_write_latency","title":"node_cifs_write_latency","text":"

      Average latency for CIFS write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node average_write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_write_latencyUnit: microsecType: averageBase: cifs_write_ops conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cifs_write_ops","title":"node_cifs_write_ops","text":"

      Total number of CIFS write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs:node total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_node.yaml ZAPI perf-object-get-instances cifs:node cifs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_node.yaml"},{"location":"ontap-metrics/#node_cpu_busy","title":"node_cpu_busy","text":"

      System CPU resource utilization. Returns a computed percentage for the default CPU field. Basically computes a 'cpu usage summary' value which indicates how 'busy' the system is based upon the most heavily utilized domain. The idea is to determine the amount of available CPU until we're limited by either a domain maxing out OR we exhaust all available idle CPU cycles, whichever occurs first.

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node cpu_busyUnit: percentType: percentBase: cpu_elapsed_time conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node cpu_busyUnit: percentType: percentBase: cpu_elapsed_time conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_cpu_busytime","title":"node_cpu_busytime","text":"

      The time (in hundredths of a second) that the CPU has been doing useful work since the last boot

      API Endpoint Metric Template REST api/private/cli/node cpu_busy_time conf/rest/9.12.0/node.yaml ZAPI system-node-get-iter node-details-info.cpu-busytime conf/zapi/cdot/9.8.0/node.yaml"},{"location":"ontap-metrics/#node_cpu_domain_busy","title":"node_cpu_domain_busy","text":"

      Array of processor time in percentage spent in various domains

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node domain_busyUnit: percentType: percentBase: cpu_elapsed_time conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node domain_busyUnit: percentType: percentBase: cpu_elapsed_time conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_cpu_elapsed_time","title":"node_cpu_elapsed_time","text":"

      Elapsed time since boot

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node cpu_elapsed_timeUnit: microsecType: deltaBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node cpu_elapsed_timeUnit: noneType: delta,no-displayBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_disk_busy","title":"node_disk_busy","text":"

      The utilization percent of the disk. node_disk_busy is disk_busy aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_capacity","title":"node_disk_capacity","text":"

      Disk capacity in MB. node_disk_capacity is disk_capacity aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_cp_read_chain","title":"node_disk_cp_read_chain","text":"

      Average number of blocks transferred in each consistency point read operation during a CP. node_disk_cp_read_chain is disk_cp_read_chain aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_cp_read_latency","title":"node_disk_cp_read_latency","text":"

      Average latency per block in microseconds for consistency point read operations. node_disk_cp_read_latency is disk_cp_read_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_cp_reads","title":"node_disk_cp_reads","text":"

      Number of disk read operations initiated each second for consistency point processing. node_disk_cp_reads is disk_cp_reads aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_data_read","title":"node_disk_data_read","text":"

      Number of disk kilobytes (KB) read per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node disk_data_readUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node disk_data_readUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_disk_data_written","title":"node_disk_data_written","text":"

      Number of disk kilobytes (KB) written per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node disk_data_writtenUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node disk_data_writtenUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_disk_io_pending","title":"node_disk_io_pending","text":"

      Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_io_pending is disk_io_pending aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_io_queued","title":"node_disk_io_queued","text":"

      Number of I/Os queued to the disk but not yet issued. node_disk_io_queued is disk_io_queued aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_busy","title":"node_disk_max_busy","text":"

      The utilization percent of the disk. node_disk_max_busy is the maximum of disk_busy for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_capacity","title":"node_disk_max_capacity","text":"

      Disk capacity in MB. node_disk_max_capacity is the maximum of disk_capacity for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_cp_read_chain","title":"node_disk_max_cp_read_chain","text":"

      Average number of blocks transferred in each consistency point read operation during a CP. node_disk_max_cp_read_chain is the maximum of disk_cp_read_chain for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_cp_read_latency","title":"node_disk_max_cp_read_latency","text":"

      Average latency per block in microseconds for consistency point read operations. node_disk_max_cp_read_latency is the maximum of disk_cp_read_latency for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_cp_reads","title":"node_disk_max_cp_reads","text":"

      Number of disk read operations initiated each second for consistency point processing. node_disk_max_cp_reads is the maximum of disk_cp_reads for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_io_pending","title":"node_disk_max_io_pending","text":"

      Average number of I/Os issued to the disk for which we have not yet received the response. node_disk_max_io_pending is the maximum of disk_io_pending for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_io_queued","title":"node_disk_max_io_queued","text":"

      Number of I/Os queued to the disk but not yet issued. node_disk_max_io_queued is the maximum of disk_io_queued for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_total_data","title":"node_disk_max_total_data","text":"

      Total throughput for user operations per second. node_disk_max_total_data is the maximum of disk_total_data for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_total_transfers","title":"node_disk_max_total_transfers","text":"

      Total number of disk operations involving data transfer initiated per second. node_disk_max_total_transfers is the maximum of disk_total_transfers for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_read_blocks","title":"node_disk_max_user_read_blocks","text":"

      Number of blocks transferred for user read operations per second. node_disk_max_user_read_blocks is the maximum of disk_user_read_blocks for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_read_chain","title":"node_disk_max_user_read_chain","text":"

      Average number of blocks transferred in each user read operation. node_disk_max_user_read_chain is the maximum of disk_user_read_chain for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_read_latency","title":"node_disk_max_user_read_latency","text":"

      Average latency per block in microseconds for user read operations. node_disk_max_user_read_latency is the maximum of disk_user_read_latency for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_reads","title":"node_disk_max_user_reads","text":"

      Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_max_user_reads is the maximum of disk_user_reads for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_write_blocks","title":"node_disk_max_user_write_blocks","text":"

      Number of blocks transferred for user write operations per second. node_disk_max_user_write_blocks is the maximum of disk_user_write_blocks for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_write_chain","title":"node_disk_max_user_write_chain","text":"

      Average number of blocks transferred in each user write operation. node_disk_max_user_write_chain is the maximum of disk_user_write_chain for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_write_latency","title":"node_disk_max_user_write_latency","text":"

      Average latency per block in microseconds for user write operations. node_disk_max_user_write_latency is the maximum of disk_user_write_latency for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_max_user_writes","title":"node_disk_max_user_writes","text":"

      Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_max_user_writes is the maximum of disk_user_writes for label node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_total_data","title":"node_disk_total_data","text":"

      Total throughput for user operations per second. node_disk_total_data is disk_total_data aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_total_transfers","title":"node_disk_total_transfers","text":"

      Total number of disk operations involving data transfer initiated per second. node_disk_total_transfers is disk_total_transfers aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_read_blocks","title":"node_disk_user_read_blocks","text":"

      Number of blocks transferred for user read operations per second. node_disk_user_read_blocks is disk_user_read_blocks aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_read_chain","title":"node_disk_user_read_chain","text":"

      Average number of blocks transferred in each user read operation. node_disk_user_read_chain is disk_user_read_chain aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_read_latency","title":"node_disk_user_read_latency","text":"

      Average latency per block in microseconds for user read operations. node_disk_user_read_latency is disk_user_read_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_reads","title":"node_disk_user_reads","text":"

      Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. node_disk_user_reads is disk_user_reads aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_write_blocks","title":"node_disk_user_write_blocks","text":"

      Number of blocks transferred for user write operations per second. node_disk_user_write_blocks is disk_user_write_blocks aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_write_chain","title":"node_disk_user_write_chain","text":"

      Average number of blocks transferred in each user write operation. node_disk_user_write_chain is disk_user_write_chain aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_write_latency","title":"node_disk_user_write_latency","text":"

      Average latency per block in microseconds for user write operations. node_disk_user_write_latency is disk_user_write_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_disk_user_writes","title":"node_disk_user_writes","text":"

      Number of disk write operations initiated each second for storing data or metadata associated with user requests. node_disk_user_writes is disk_user_writes aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#node_failed_fan","title":"node_failed_fan","text":"

      Specifies a count of the number of chassis fans that are not operating within the recommended RPM range.

      API Endpoint Metric Template REST api/cluster/nodes controller.failed_fan.count conf/rest/9.12.0/node.yaml ZAPI system-node-get-iter node-details-info.env-failed-fan-count conf/zapi/cdot/9.8.0/node.yaml"},{"location":"ontap-metrics/#node_failed_power","title":"node_failed_power","text":"

      Number of failed power supply units.

      API Endpoint Metric Template REST api/cluster/nodes controller.failed_power_supply.count conf/rest/9.12.0/node.yaml ZAPI system-node-get-iter node-details-info.env-failed-power-supply-count conf/zapi/cdot/9.8.0/node.yaml"},{"location":"ontap-metrics/#node_fcp_data_recv","title":"node_fcp_data_recv","text":"

      Number of FCP kilobytes (KB) received per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node fcp_data_receivedUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node fcp_data_recvUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_fcp_data_sent","title":"node_fcp_data_sent","text":"

      Number of FCP kilobytes (KB) sent per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node fcp_data_sentUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node fcp_data_sentUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_fcp_ops","title":"node_fcp_ops","text":"

      Number of FCP operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node fcp_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node fcp_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_hdd_data_read","title":"node_hdd_data_read","text":"

      Number of HDD Disk kilobytes (KB) read per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node hdd_data_readUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node hdd_data_readUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_hdd_data_written","title":"node_hdd_data_written","text":"

      Number of HDD kilobytes (KB) written per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node hdd_data_writtenUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node hdd_data_writtenUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_iscsi_ops","title":"node_iscsi_ops","text":"

      Number of iSCSI operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node iscsi_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node iscsi_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_memory","title":"node_memory","text":"

      Total memory in megabytes (MB)

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node memoryUnit: noneType: rawBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node memoryUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_net_data_recv","title":"node_net_data_recv","text":"

      Number of network kilobytes (KB) received per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node network_data_receivedUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node net_data_recvUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_net_data_sent","title":"node_net_data_sent","text":"

      Number of network kilobytes (KB) sent per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node network_data_sentUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node net_data_sentUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_nfs_access_avg_latency","title":"node_nfs_access_avg_latency","text":"

      Average latency of Access procedure requests. The counter keeps track of the average response time of Access requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_access_total","title":"node_nfs_access_total","text":"

      Total number of Access procedure requests. It is the total number of access success and access error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_backchannel_ctl_avg_latency","title":"node_nfs_backchannel_ctl_avg_latency","text":"

      Average latency of BACKCHANNEL_CTL operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node backchannel_ctl.average_latencyUnit: microsecType: averageBase: backchannel_ctl.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node backchannel_ctl.average_latencyUnit: microsecType: averageBase: backchannel_ctl.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node backchannel_ctl_avg_latencyUnit: microsecType: average,no-zero-valuesBase: backchannel_ctl_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node backchannel_ctl_avg_latencyUnit: microsecType: average,no-zero-valuesBase: backchannel_ctl_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_backchannel_ctl_total","title":"node_nfs_backchannel_ctl_total","text":"

      Total number of BACKCHANNEL_CTL operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node backchannel_ctl.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node backchannel_ctl.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node backchannel_ctl_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node backchannel_ctl_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_bind_conn_to_session_avg_latency","title":"node_nfs_bind_conn_to_session_avg_latency","text":"

      Average latency of BIND_CONN_TO_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node bind_connections_to_session.average_latencyUnit: microsecType: averageBase: bind_connections_to_session.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node bind_conn_to_session.average_latencyUnit: microsecType: averageBase: bind_conn_to_session.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node bind_conn_to_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: bind_conn_to_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node bind_conn_to_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: bind_conn_to_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_bind_conn_to_session_total","title":"node_nfs_bind_conn_to_session_total","text":"

      Total number of BIND_CONN_TO_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node bind_connections_to_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node bind_conn_to_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node bind_conn_to_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node bind_conn_to_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_close_avg_latency","title":"node_nfs_close_avg_latency","text":"

      Average latency of CLOSE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_close_total","title":"node_nfs_close_total","text":"

      Total number of CLOSE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_commit_avg_latency","title":"node_nfs_commit_avg_latency","text":"

      Average latency of Commit procedure requests. The counter keeps track of the average response time of Commit requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_commit_total","title":"node_nfs_commit_total","text":"

      Total number of Commit procedure requests. It is the total number of Commit success and Commit error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_create_avg_latency","title":"node_nfs_create_avg_latency","text":"

      Average latency of Create procedure requests. The counter keeps track of the average response time of Create requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_create_session_avg_latency","title":"node_nfs_create_session_avg_latency","text":"

      Average latency of CREATE_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node create_session.average_latencyUnit: microsecType: averageBase: create_session.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node create_session.average_latencyUnit: microsecType: averageBase: create_session.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node create_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node create_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_create_session_total","title":"node_nfs_create_session_total","text":"

      Total number of CREATE_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node create_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node create_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node create_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node create_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_create_total","title":"node_nfs_create_total","text":"

      Total number Create of procedure requests. It is the total number of create success and create error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_delegpurge_avg_latency","title":"node_nfs_delegpurge_avg_latency","text":"

      Average latency of DELEGPURGE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_delegpurge_total","title":"node_nfs_delegpurge_total","text":"

      Total number of DELEGPURGE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_delegreturn_avg_latency","title":"node_nfs_delegreturn_avg_latency","text":"

      Average latency of DELEGRETURN operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_delegreturn_total","title":"node_nfs_delegreturn_total","text":"

      Total number of DELEGRETURN operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_destroy_clientid_avg_latency","title":"node_nfs_destroy_clientid_avg_latency","text":"

      Average latency of DESTROY_CLIENTID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node destroy_clientid.average_latencyUnit: microsecType: averageBase: destroy_clientid.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node destroy_clientid.average_latencyUnit: microsecType: averageBase: destroy_clientid.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node destroy_clientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_clientid_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node destroy_clientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_clientid_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_destroy_clientid_total","title":"node_nfs_destroy_clientid_total","text":"

      Total number of DESTROY_CLIENTID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node destroy_clientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node destroy_clientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node destroy_clientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node destroy_clientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_destroy_session_avg_latency","title":"node_nfs_destroy_session_avg_latency","text":"

      Average latency of DESTROY_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node destroy_session.average_latencyUnit: microsecType: averageBase: destroy_session.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node destroy_session.average_latencyUnit: microsecType: averageBase: destroy_session.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node destroy_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node destroy_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_destroy_session_total","title":"node_nfs_destroy_session_total","text":"

      Total number of DESTROY_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node destroy_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node destroy_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node destroy_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node destroy_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_exchange_id_avg_latency","title":"node_nfs_exchange_id_avg_latency","text":"

      Average latency of EXCHANGE_ID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node exchange_id.average_latencyUnit: microsecType: averageBase: exchange_id.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node exchange_id.average_latencyUnit: microsecType: averageBase: exchange_id.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node exchange_id_avg_latencyUnit: microsecType: average,no-zero-valuesBase: exchange_id_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node exchange_id_avg_latencyUnit: microsecType: average,no-zero-valuesBase: exchange_id_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_exchange_id_total","title":"node_nfs_exchange_id_total","text":"

      Total number of EXCHANGE_ID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node exchange_id.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node exchange_id.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node exchange_id_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node exchange_id_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_free_stateid_avg_latency","title":"node_nfs_free_stateid_avg_latency","text":"

      Average latency of FREE_STATEID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node free_stateid.average_latencyUnit: microsecType: averageBase: free_stateid.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node free_stateid.average_latencyUnit: microsecType: averageBase: free_stateid.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node free_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: free_stateid_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node free_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: free_stateid_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_free_stateid_total","title":"node_nfs_free_stateid_total","text":"

      Total number of FREE_STATEID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node free_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node free_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node free_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node free_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_fsinfo_avg_latency","title":"node_nfs_fsinfo_avg_latency","text":"

      Average latency of FSInfo procedure requests. The counter keeps track of the average response time of FSInfo requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node fsinfo.average_latencyUnit: microsecType: averageBase: fsinfo.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node fsinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: fsinfo_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_fsinfo_total","title":"node_nfs_fsinfo_total","text":"

      Total number FSInfo of procedure requests. It is the total number of FSInfo success and FSInfo error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node fsinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node fsinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_fsstat_avg_latency","title":"node_nfs_fsstat_avg_latency","text":"

      Average latency of FSStat procedure requests. The counter keeps track of the average response time of FSStat requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node fsstat.average_latencyUnit: microsecType: averageBase: fsstat.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node fsstat_avg_latencyUnit: microsecType: average,no-zero-valuesBase: fsstat_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_fsstat_total","title":"node_nfs_fsstat_total","text":"

      Total number FSStat of procedure requests. It is the total number of FSStat success and FSStat error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node fsstat.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node fsstat_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_get_dir_delegation_avg_latency","title":"node_nfs_get_dir_delegation_avg_latency","text":"

      Average latency of GET_DIR_DELEGATION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node get_dir_delegation.average_latencyUnit: microsecType: averageBase: get_dir_delegation.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node get_dir_delegation.average_latencyUnit: microsecType: averageBase: get_dir_delegation.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node get_dir_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: get_dir_delegation_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node get_dir_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: get_dir_delegation_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_get_dir_delegation_total","title":"node_nfs_get_dir_delegation_total","text":"

      Total number of GET_DIR_DELEGATION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node get_dir_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node get_dir_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node get_dir_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node get_dir_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getattr_avg_latency","title":"node_nfs_getattr_avg_latency","text":"

      Average latency of GetAttr procedure requests. This counter keeps track of the average response time of GetAttr requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_getattr_total","title":"node_nfs_getattr_total","text":"

      Total number of Getattr procedure requests. It is the total number of getattr success and getattr error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_getdeviceinfo_avg_latency","title":"node_nfs_getdeviceinfo_avg_latency","text":"

      Average latency of GETDEVICEINFO operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getdeviceinfo.average_latencyUnit: microsecType: averageBase: getdeviceinfo.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getdeviceinfo.average_latencyUnit: microsecType: averageBase: getdeviceinfo.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getdeviceinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdeviceinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getdeviceinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdeviceinfo_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getdeviceinfo_total","title":"node_nfs_getdeviceinfo_total","text":"

      Total number of GETDEVICEINFO operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getdeviceinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getdeviceinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getdeviceinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getdeviceinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getdevicelist_avg_latency","title":"node_nfs_getdevicelist_avg_latency","text":"

      Average latency of GETDEVICELIST operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getdevicelist.average_latencyUnit: microsecType: averageBase: getdevicelist.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getdevicelist.average_latencyUnit: microsecType: averageBase: getdevicelist.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getdevicelist_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdevicelist_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getdevicelist_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdevicelist_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getdevicelist_total","title":"node_nfs_getdevicelist_total","text":"

      Total number of GETDEVICELIST operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getdevicelist.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getdevicelist.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getdevicelist_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getdevicelist_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_getfh_avg_latency","title":"node_nfs_getfh_avg_latency","text":"

      Average latency of GETFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_getfh_total","title":"node_nfs_getfh_total","text":"

      Total number of GETFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_latency","title":"node_nfs_latency","text":"

      Average latency of NFSv3 requests. This counter keeps track of the average response time of NFSv3 requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutcommit_avg_latency","title":"node_nfs_layoutcommit_avg_latency","text":"

      Average latency of LAYOUTCOMMIT operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutcommit.average_latencyUnit: microsecType: averageBase: layoutcommit.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutcommit.average_latencyUnit: microsecType: averageBase: layoutcommit.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutcommit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutcommit_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutcommit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutcommit_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutcommit_total","title":"node_nfs_layoutcommit_total","text":"

      Total number of LAYOUTCOMMIT operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutcommit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutcommit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutcommit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutcommit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutget_avg_latency","title":"node_nfs_layoutget_avg_latency","text":"

      Average latency of LAYOUTGET operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutget.average_latencyUnit: microsecType: averageBase: layoutget.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutget.average_latencyUnit: microsecType: averageBase: layoutget.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutget_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutget_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutget_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutget_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutget_total","title":"node_nfs_layoutget_total","text":"

      Total number of LAYOUTGET operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutget.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutget.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutget_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutget_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutreturn_avg_latency","title":"node_nfs_layoutreturn_avg_latency","text":"

      Average latency of LAYOUTRETURN operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutreturn.average_latencyUnit: microsecType: averageBase: layoutreturn.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutreturn.average_latencyUnit: microsecType: averageBase: layoutreturn.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutreturn_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_layoutreturn_total","title":"node_nfs_layoutreturn_total","text":"

      Total number of LAYOUTRETURN operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node layoutreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node layoutreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node layoutreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node layoutreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_link_avg_latency","title":"node_nfs_link_avg_latency","text":"

      Average latency of Link procedure requests. The counter keeps track of the average response time of Link requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_link_total","title":"node_nfs_link_total","text":"

      Total number Link of procedure requests. It is the total number of Link success and Link error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lock_avg_latency","title":"node_nfs_lock_avg_latency","text":"

      Average latency of LOCK operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lock_total","title":"node_nfs_lock_total","text":"

      Total number of LOCK operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lockt_avg_latency","title":"node_nfs_lockt_avg_latency","text":"

      Average latency of LOCKT operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lockt_total","title":"node_nfs_lockt_total","text":"

      Total number of LOCKT operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_locku_avg_latency","title":"node_nfs_locku_avg_latency","text":"

      Average latency of LOCKU operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_locku_total","title":"node_nfs_locku_total","text":"

      Total number of LOCKU operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lookup_avg_latency","title":"node_nfs_lookup_avg_latency","text":"

      Average latency of LookUp procedure requests. This shows the average time it takes for the LookUp operation to reply to the request.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lookup_total","title":"node_nfs_lookup_total","text":"

      Total number of Lookup procedure requests. It is the total number of lookup success and lookup error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lookupp_avg_latency","title":"node_nfs_lookupp_avg_latency","text":"

      Average latency of LOOKUPP operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_lookupp_total","title":"node_nfs_lookupp_total","text":"

      Total number of LOOKUPP operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_mkdir_avg_latency","title":"node_nfs_mkdir_avg_latency","text":"

      Average latency of MkDir procedure requests. The counter keeps track of the average response time of MkDir requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node mkdir.average_latencyUnit: microsecType: averageBase: mkdir.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node mkdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: mkdir_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_mkdir_total","title":"node_nfs_mkdir_total","text":"

      Total number MkDir of procedure requests. It is the total number of MkDir success and MkDir error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node mkdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node mkdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_mknod_avg_latency","title":"node_nfs_mknod_avg_latency","text":"

      Average latency of MkNod procedure requests. The counter keeps track of the average response time of MkNod requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node mknod.average_latencyUnit: microsecType: averageBase: mknod.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node mknod_avg_latencyUnit: microsecType: average,no-zero-valuesBase: mknod_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_mknod_total","title":"node_nfs_mknod_total","text":"

      Total number MkNod of procedure requests. It is the total number of MkNod success and MkNod error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node mknod.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node mknod_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_null_avg_latency","title":"node_nfs_null_avg_latency","text":"

      Average latency of Null procedure requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_null_total","title":"node_nfs_null_total","text":"

      Total number of Null procedure requests. It is the total of null success and null error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_nverify_avg_latency","title":"node_nfs_nverify_avg_latency","text":"

      Average latency of NVERIFY operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_nverify_total","title":"node_nfs_nverify_total","text":"

      Total number of NVERIFY operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_avg_latency","title":"node_nfs_open_avg_latency","text":"

      Average latency of OPEN operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_confirm_avg_latency","title":"node_nfs_open_confirm_avg_latency","text":"

      Average latency of OPEN_CONFIRM procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node open_confirm.average_latencyUnit: microsecType: averageBase: open_confirm.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node open_confirm_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_confirm_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_confirm_total","title":"node_nfs_open_confirm_total","text":"

      Total number of OPEN_CONFIRM procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node open_confirm.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node open_confirm_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_downgrade_avg_latency","title":"node_nfs_open_downgrade_avg_latency","text":"

      Average latency of OPEN_DOWNGRADE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_downgrade_total","title":"node_nfs_open_downgrade_total","text":"

      Total number of OPEN_DOWNGRADE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_open_total","title":"node_nfs_open_total","text":"

      Total number of OPEN operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_openattr_avg_latency","title":"node_nfs_openattr_avg_latency","text":"

      Average latency of OPENATTR operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_openattr_total","title":"node_nfs_openattr_total","text":"

      Total number of OPENATTR operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_ops","title":"node_nfs_ops","text":"

      Number of NFS operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node nfs_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node nfs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_nfs_pathconf_avg_latency","title":"node_nfs_pathconf_avg_latency","text":"

      Average latency of PathConf procedure requests. The counter keeps track of the average response time of PathConf requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node pathconf.average_latencyUnit: microsecType: averageBase: pathconf.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node pathconf_avg_latencyUnit: microsecType: average,no-zero-valuesBase: pathconf_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_pathconf_total","title":"node_nfs_pathconf_total","text":"

      Total number PathConf of procedure requests. It is the total number of PathConf success and PathConf error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node pathconf.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node pathconf_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_putfh_avg_latency","title":"node_nfs_putfh_avg_latency","text":"

      The number of successful PUTPUBFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putfh.average_latencyUnit: noneType: deltaBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putfh.average_latencyUnit: microsecType: averageBase: putfh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putfh.average_latencyUnit: microsecType: averageBase: putfh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putfh_total","title":"node_nfs_putfh_total","text":"

      Total number of PUTFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putpubfh_avg_latency","title":"node_nfs_putpubfh_avg_latency","text":"

      Average latency of PUTPUBFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putpubfh_total","title":"node_nfs_putpubfh_total","text":"

      Total number of PUTPUBFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putrootfh_avg_latency","title":"node_nfs_putrootfh_avg_latency","text":"

      Average latency of PUTROOTFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_putrootfh_total","title":"node_nfs_putrootfh_total","text":"

      Total number of PUTROOTFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_avg_latency","title":"node_nfs_read_avg_latency","text":"

      Average latency of Read procedure requests. The counter keeps track of the average response time of Read requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_ops","title":"node_nfs_read_ops","text":"

      Total observed NFSv3 read operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_read_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_symlink_avg_latency","title":"node_nfs_read_symlink_avg_latency","text":"

      Average latency of ReadSymLink procedure requests. The counter keeps track of the average response time of ReadSymLink requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read_symlink.average_latencyUnit: microsecType: averageBase: read_symlink.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node read_symlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_symlink_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_symlink_total","title":"node_nfs_read_symlink_total","text":"

      Total number of ReadSymLink procedure requests. It is the total number of read symlink success and read symlink error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read_symlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node read_symlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_throughput","title":"node_nfs_read_throughput","text":"

      Rate of NFSv3 read data transfers per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nfs41_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nfs42_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nfs4_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_read_total","title":"node_nfs_read_total","text":"

      Total number Read of procedure requests. It is the total number of read success and read error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_readdir_avg_latency","title":"node_nfs_readdir_avg_latency","text":"

      Average latency of ReadDir procedure requests. The counter keeps track of the average response time of ReadDir requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_readdir_total","title":"node_nfs_readdir_total","text":"

      Total number ReadDir of procedure requests. It is the total number of ReadDir success and ReadDir error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_readdirplus_avg_latency","title":"node_nfs_readdirplus_avg_latency","text":"

      Average latency of ReadDirPlus procedure requests. The counter keeps track of the average response time of ReadDirPlus requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node readdirplus.average_latencyUnit: microsecType: averageBase: readdirplus.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node readdirplus_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdirplus_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_readdirplus_total","title":"node_nfs_readdirplus_total","text":"

      Total number ReadDirPlus of procedure requests. It is the total number of ReadDirPlus success and ReadDirPlus error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node readdirplus.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node readdirplus_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_readlink_avg_latency","title":"node_nfs_readlink_avg_latency","text":"

      Average latency of READLINK operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_readlink_total","title":"node_nfs_readlink_total","text":"

      Total number of READLINK operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_reclaim_complete_avg_latency","title":"node_nfs_reclaim_complete_avg_latency","text":"

      Average latency of RECLAIM_COMPLETE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node reclaim_complete.average_latencyUnit: microsecType: averageBase: reclaim_complete.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node reclaim_complete.average_latencyUnit: microsecType: averageBase: reclaim_complete.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node reclaim_complete_avg_latencyUnit: microsecType: average,no-zero-valuesBase: reclaim_complete_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node reclaim_complete_avg_latencyUnit: microsecType: average,no-zero-valuesBase: reclaim_complete_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_reclaim_complete_total","title":"node_nfs_reclaim_complete_total","text":"

      Total number of RECLAIM_COMPLETE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node reclaim_complete.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node reclaim_complete.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node reclaim_complete_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node reclaim_complete_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_release_lock_owner_avg_latency","title":"node_nfs_release_lock_owner_avg_latency","text":"

      Average Latency of RELEASE_LOCKOWNER procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node release_lock_owner.average_latencyUnit: microsecType: averageBase: release_lock_owner.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node release_lock_owner_avg_latencyUnit: microsecType: average,no-zero-valuesBase: release_lock_owner_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_release_lock_owner_total","title":"node_nfs_release_lock_owner_total","text":"

      Total number of RELEASE_LOCKOWNER procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node release_lock_owner.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node release_lock_owner_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_remove_avg_latency","title":"node_nfs_remove_avg_latency","text":"

      Average latency of Remove procedure requests. The counter keeps track of the average response time of Remove requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_remove_total","title":"node_nfs_remove_total","text":"

      Total number Remove of procedure requests. It is the total number of Remove success and Remove error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_rename_avg_latency","title":"node_nfs_rename_avg_latency","text":"

      Average latency of Rename procedure requests. The counter keeps track of the average response time of Rename requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_rename_total","title":"node_nfs_rename_total","text":"

      Total number Rename of procedure requests. It is the total number of Rename success and Rename error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_renew_avg_latency","title":"node_nfs_renew_avg_latency","text":"

      Average latency of RENEW procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node renew.average_latencyUnit: microsecType: averageBase: renew.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node renew_avg_latencyUnit: microsecType: average,no-zero-valuesBase: renew_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_renew_total","title":"node_nfs_renew_total","text":"

      Total number of RENEW procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node renew.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node renew_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_restorefh_avg_latency","title":"node_nfs_restorefh_avg_latency","text":"

      Average latency of RESTOREFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_restorefh_total","title":"node_nfs_restorefh_total","text":"

      Total number of RESTOREFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_rmdir_avg_latency","title":"node_nfs_rmdir_avg_latency","text":"

      Average latency of RmDir procedure requests. The counter keeps track of the average response time of RmDir requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node rmdir.average_latencyUnit: microsecType: averageBase: rmdir.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node rmdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rmdir_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_rmdir_total","title":"node_nfs_rmdir_total","text":"

      Total number RmDir of procedure requests. It is the total number of RmDir success and RmDir error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node rmdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node rmdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_savefh_avg_latency","title":"node_nfs_savefh_avg_latency","text":"

      Average latency of SAVEFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_savefh_total","title":"node_nfs_savefh_total","text":"

      Total number of SAVEFH operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_secinfo_avg_latency","title":"node_nfs_secinfo_avg_latency","text":"

      Average latency of SECINFO operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_secinfo_no_name_avg_latency","title":"node_nfs_secinfo_no_name_avg_latency","text":"

      Average latency of SECINFO_NO_NAME operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node secinfo_no_name.average_latencyUnit: microsecType: averageBase: secinfo_no_name.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node secinfo_no_name.average_latencyUnit: microsecType: averageBase: secinfo_no_name.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node secinfo_no_name_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_no_name_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node secinfo_no_name_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_no_name_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_secinfo_no_name_total","title":"node_nfs_secinfo_no_name_total","text":"

      Total number of SECINFO_NO_NAME operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node secinfo_no_name.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node secinfo_no_name.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node secinfo_no_name_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node secinfo_no_name_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_secinfo_total","title":"node_nfs_secinfo_total","text":"

      Total number of SECINFO operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_sequence_avg_latency","title":"node_nfs_sequence_avg_latency","text":"

      Average latency of SEQUENCE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node sequence.average_latencyUnit: microsecType: averageBase: sequence.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node sequence.average_latencyUnit: microsecType: averageBase: sequence.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node sequence_avg_latencyUnit: microsecType: average,no-zero-valuesBase: sequence_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node sequence_avg_latencyUnit: microsecType: average,no-zero-valuesBase: sequence_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_sequence_total","title":"node_nfs_sequence_total","text":"

      Total number of SEQUENCE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node sequence.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node sequence.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node sequence_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node sequence_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_set_ssv_avg_latency","title":"node_nfs_set_ssv_avg_latency","text":"

      Average latency of SET_SSV operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node set_ssv.average_latencyUnit: microsecType: averageBase: set_ssv.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node set_ssv.average_latencyUnit: microsecType: averageBase: set_ssv.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node set_ssv_avg_latencyUnit: microsecType: average,no-zero-valuesBase: set_ssv_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node set_ssv_avg_latencyUnit: microsecType: average,no-zero-valuesBase: set_ssv_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_set_ssv_total","title":"node_nfs_set_ssv_total","text":"

      Total number of SET_SSV operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node set_ssv.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node set_ssv.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node set_ssv_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node set_ssv_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_setattr_avg_latency","title":"node_nfs_setattr_avg_latency","text":"

      Average latency of SetAttr procedure requests. The counter keeps track of the average response time of SetAttr requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setattr_total","title":"node_nfs_setattr_total","text":"

      Total number of Setattr procedure requests. It is the total number of Setattr success and setattr error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setclientid_avg_latency","title":"node_nfs_setclientid_avg_latency","text":"

      Average latency of SETCLIENTID procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node setclientid.average_latencyUnit: microsecType: averageBase: setclientid.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node setclientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setclientid_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setclientid_confirm_avg_latency","title":"node_nfs_setclientid_confirm_avg_latency","text":"

      Average latency of SETCLIENTID_CONFIRM procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node setclientid_confirm.average_latencyUnit: microsecType: averageBase: setclientid_confirm.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node setclientid_confirm_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setclientid_confirm_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setclientid_confirm_total","title":"node_nfs_setclientid_confirm_total","text":"

      Total number of SETCLIENTID_CONFIRM procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node setclientid_confirm.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node setclientid_confirm_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_setclientid_total","title":"node_nfs_setclientid_total","text":"

      Total number of SETCLIENTID procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4:node setclientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4:node setclientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_symlink_avg_latency","title":"node_nfs_symlink_avg_latency","text":"

      Average latency of SymLink procedure requests. The counter keeps track of the average response time of SymLink requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node symlink.average_latencyUnit: microsecType: averageBase: symlink.total conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node symlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: symlink_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_symlink_total","title":"node_nfs_symlink_total","text":"

      Total number SymLink of procedure requests. It is the total number of SymLink success and create SymLink requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node symlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node symlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_test_stateid_avg_latency","title":"node_nfs_test_stateid_avg_latency","text":"

      Average latency of TEST_STATEID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node test_stateid.average_latencyUnit: microsecType: averageBase: test_stateid.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node test_stateid.average_latencyUnit: microsecType: averageBase: test_stateid.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node test_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: test_stateid_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node test_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: test_stateid_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_test_stateid_total","title":"node_nfs_test_stateid_total","text":"

      Total number of TEST_STATEID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node test_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node test_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node test_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node test_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_throughput","title":"node_nfs_throughput","text":"

      Rate of NFSv3 data transfers per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nfs41_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nfs42_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nfs4_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_total_ops","title":"node_nfs_total_ops","text":"

      Total number of NFSv3 procedure requests per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_verify_avg_latency","title":"node_nfs_verify_avg_latency","text":"

      Average latency of VERIFY operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_verify_total","title":"node_nfs_verify_total","text":"

      Total number of VERIFY operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv4_1:node verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_want_delegation_avg_latency","title":"node_nfs_want_delegation_avg_latency","text":"

      Average latency of WANT_DELEGATION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node want_delegation.average_latencyUnit: microsecType: averageBase: want_delegation.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node want_delegation.average_latencyUnit: microsecType: averageBase: want_delegation.total conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node want_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: want_delegation_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node want_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: want_delegation_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_want_delegation_total","title":"node_nfs_want_delegation_total","text":"

      Total number of WANT_DELEGATION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41:node want_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node want_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4_1:node want_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node want_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml"},{"location":"ontap-metrics/#node_nfs_write_avg_latency","title":"node_nfs_write_avg_latency","text":"

      Average latency of Write procedure requests. The counter keeps track of the average response time of Write requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_write_ops","title":"node_nfs_write_ops","text":"

      Total observed NFSv3 write operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_write_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml"},{"location":"ontap-metrics/#node_nfs_write_throughput","title":"node_nfs_write_throughput","text":"

      Rate of NFSv3 write data transfers per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node nfsv3_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node nfs41_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node nfs42_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node nfs4_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nfs_write_total","title":"node_nfs_write_total","text":"

      Total number of Write procedure requests. It is the total number of write success and write error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3:node write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3_node.yaml REST api/cluster/counter/tables/svm_nfs_v41:node write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1_node.yaml REST api/cluster/counter/tables/svm_nfs_v42:node write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2_node.yaml REST api/cluster/counter/tables/svm_nfs_v4:node write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_node.yaml ZAPI perf-object-get-instances nfsv3:node write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3_node.yaml ZAPI perf-object-get-instances nfsv4_1:node write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1_node.yaml ZAPI perf-object-get-instances nfsv4_2:node write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2_node.yaml ZAPI perf-object-get-instances nfsv4:node write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_node.yaml"},{"location":"ontap-metrics/#node_nvme_fc_data_recv","title":"node_nvme_fc_data_recv","text":"

      NVMe/FC kilobytes (KB) received per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_data_receivedUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvme_fc_data_sent","title":"node_nvme_fc_data_sent","text":"

      NVMe/FC kilobytes (KB) sent per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_data_sentUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvme_fc_ops","title":"node_nvme_fc_ops","text":"

      NVMe/FC operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvmf_data_recv","title":"node_nvmf_data_recv","text":"

      NVMe/FC kilobytes (KB) received per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_data_received, 1Unit: Type: Base: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node nvmf_data_recvUnit: Type: Base: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvmf_data_sent","title":"node_nvmf_data_sent","text":"

      NVMe/FC kilobytes (KB) sent per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_data_sent, 1Unit: Type: Base: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node nvmf_data_sentUnit: Type: Base: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_nvmf_ops","title":"node_nvmf_ops","text":"

      NVMe/FC operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node nvme_fc_ops, 1Unit: Type: Base: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node nvmf_opsUnit: Type: Base: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_ssd_data_read","title":"node_ssd_data_read","text":"

      Number of SSD Disk kilobytes (KB) read per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node ssd_data_readUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node ssd_data_readUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_ssd_data_written","title":"node_ssd_data_written","text":"

      Number of SSD Disk kilobytes (KB) written per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node ssd_data_writtenUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node ssd_data_writtenUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_total_data","title":"node_total_data","text":"

      Total throughput in bytes

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_total_latency","title":"node_total_latency","text":"

      Average latency for all operations in the system in microseconds

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node total_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node total_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_total_ops","title":"node_total_ops","text":"

      Total number of operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/system:node total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/system_node.yaml ZAPI perf-object-get-instances system:node total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/system_node.yaml"},{"location":"ontap-metrics/#node_uptime","title":"node_uptime","text":"

      The total time, in seconds, that the node has been up.

      API Endpoint Metric Template REST api/cluster/nodes uptime conf/rest/9.12.0/node.yaml ZAPI system-node-get-iter node-details-info.node-uptime conf/zapi/cdot/9.8.0/node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_other_latency","title":"node_vol_cifs_other_latency","text":"

      Average time for the WAFL filesystem to process other CIFS operations to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.other_latencyUnit: microsecType: averageBase: cifs.other_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_other_latencyUnit: microsecType: averageBase: cifs_other_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_other_ops","title":"node_vol_cifs_other_ops","text":"

      Number of other CIFS operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_read_data","title":"node_vol_cifs_read_data","text":"

      Bytes read per second via CIFS

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_read_latency","title":"node_vol_cifs_read_latency","text":"

      Average time for the WAFL filesystem to process CIFS read requests to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.read_latencyUnit: microsecType: averageBase: cifs.read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_read_latencyUnit: microsecType: averageBase: cifs_read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_read_ops","title":"node_vol_cifs_read_ops","text":"

      Number of CIFS read operations per second from the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_write_data","title":"node_vol_cifs_write_data","text":"

      Bytes written per second via CIFS

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_write_latency","title":"node_vol_cifs_write_latency","text":"

      Average time for the WAFL filesystem to process CIFS write requests to the volume; not including CIFS protocol request processing or network communication time which will also be included in client observed CIFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.write_latencyUnit: microsecType: averageBase: cifs.write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_write_latencyUnit: microsecType: averageBase: cifs_write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_cifs_write_ops","title":"node_vol_cifs_write_ops","text":"

      Number of CIFS write operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node cifs.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node cifs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_other_latency","title":"node_vol_fcp_other_latency","text":"

      Average time for the WAFL filesystem to process other FCP protocol operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.other_latencyUnit: microsecType: averageBase: fcp.other_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_other_latencyUnit: microsecType: averageBase: fcp_other_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_other_ops","title":"node_vol_fcp_other_ops","text":"

      Number of other block protocol operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_read_data","title":"node_vol_fcp_read_data","text":"

      Bytes read per second via block protocol

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_read_latency","title":"node_vol_fcp_read_latency","text":"

      Average time for the WAFL filesystem to process FCP protocol read operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.read_latencyUnit: microsecType: averageBase: fcp.read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_read_latencyUnit: microsecType: averageBase: fcp_read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_read_ops","title":"node_vol_fcp_read_ops","text":"

      Number of block protocol read operations per second from the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_write_data","title":"node_vol_fcp_write_data","text":"

      Bytes written per second via block protocol

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_write_latency","title":"node_vol_fcp_write_latency","text":"

      Average time for the WAFL filesystem to process FCP protocol write operations to the volume; not including FCP protocol request processing or network communication time which will also be included in client observed FCP protocol request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.write_latencyUnit: microsecType: averageBase: fcp.write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_write_latencyUnit: microsecType: averageBase: fcp_write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_fcp_write_ops","title":"node_vol_fcp_write_ops","text":"

      Number of block protocol write operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node fcp.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node fcp_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_other_latency","title":"node_vol_iscsi_other_latency","text":"

      Average time for the WAFL filesystem to process other iSCSI protocol operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI protocol request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.other_latencyUnit: microsecType: averageBase: iscsi.other_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_other_latencyUnit: microsecType: averageBase: iscsi_other_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_other_ops","title":"node_vol_iscsi_other_ops","text":"

      Number of other block protocol operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_read_data","title":"node_vol_iscsi_read_data","text":"

      Bytes read per second via block protocol

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_read_latency","title":"node_vol_iscsi_read_latency","text":"

      Average time for the WAFL filesystem to process iSCSI protocol read operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI protocol request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.read_latencyUnit: microsecType: averageBase: iscsi.read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_read_latencyUnit: microsecType: averageBase: iscsi_read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_read_ops","title":"node_vol_iscsi_read_ops","text":"

      Number of block protocol read operations per second from the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_write_data","title":"node_vol_iscsi_write_data","text":"

      Bytes written per second via block protocol

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_write_latency","title":"node_vol_iscsi_write_latency","text":"

      Average time for the WAFL filesystem to process iSCSI protocol write operations to the volume; not including iSCSI protocol request processing or network communication time which will also be included in client observed iSCSI request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.write_latencyUnit: microsecType: averageBase: iscsi.write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_write_latencyUnit: microsecType: averageBase: iscsi_write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_iscsi_write_ops","title":"node_vol_iscsi_write_ops","text":"

      Number of block protocol write operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node iscsi.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node iscsi_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_other_latency","title":"node_vol_nfs_other_latency","text":"

      Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.other_latencyUnit: microsecType: averageBase: nfs.other_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_other_latencyUnit: microsecType: averageBase: nfs_other_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_other_ops","title":"node_vol_nfs_other_ops","text":"

      Number of other NFS operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_read_data","title":"node_vol_nfs_read_data","text":"

      Bytes read per second via NFS

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_read_latency","title":"node_vol_nfs_read_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.read_latencyUnit: microsecType: averageBase: nfs.read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_read_latencyUnit: microsecType: averageBase: nfs_read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_read_ops","title":"node_vol_nfs_read_ops","text":"

      Number of NFS read operations per second from the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_write_data","title":"node_vol_nfs_write_data","text":"

      Bytes written per second via NFS

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_write_latency","title":"node_vol_nfs_write_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.write_latencyUnit: microsecType: averageBase: nfs.write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_write_latencyUnit: microsecType: averageBase: nfs_write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_nfs_write_ops","title":"node_vol_nfs_write_ops","text":"

      Number of NFS write operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node nfs.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node nfs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_read_latency","title":"node_vol_read_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_vol_write_latency","title":"node_vol_write_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:node write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/volume_node.yaml ZAPI perf-object-get-instances volume:node write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/volume_node.yaml"},{"location":"ontap-metrics/#node_volume_avg_latency","title":"node_volume_avg_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time. node_volume_avg_latency is volume_avg_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_access_latency","title":"node_volume_nfs_access_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol access requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_access_latency is volume_nfs_access_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.access_latencyUnit: microsecType: averageBase: nfs.access_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_access_latencyUnit: microsecType: averageBase: nfs_access_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_access_ops","title":"node_volume_nfs_access_ops","text":"

      Number of NFS accesses per second to the volume. node_volume_nfs_access_ops is volume_nfs_access_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.access_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_access_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_getattr_latency","title":"node_volume_nfs_getattr_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol getattr requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_getattr_latency is volume_nfs_getattr_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.getattr_latencyUnit: microsecType: averageBase: nfs.getattr_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_getattr_latencyUnit: microsecType: averageBase: nfs_getattr_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_getattr_ops","title":"node_volume_nfs_getattr_ops","text":"

      Number of NFS getattr per second to the volume. node_volume_nfs_getattr_ops is volume_nfs_getattr_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.getattr_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_getattr_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_lookup_latency","title":"node_volume_nfs_lookup_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol lookup requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_lookup_latency is volume_nfs_lookup_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.lookup_latencyUnit: microsecType: averageBase: nfs.lookup_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_lookup_latencyUnit: microsecType: averageBase: nfs_lookup_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_lookup_ops","title":"node_volume_nfs_lookup_ops","text":"

      Number of NFS lookups per second to the volume. node_volume_nfs_lookup_ops is volume_nfs_lookup_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.lookup_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_lookup_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_other_latency","title":"node_volume_nfs_other_latency","text":"

      Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_other_latency is volume_nfs_other_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.other_latencyUnit: microsecType: averageBase: nfs.other_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_other_latencyUnit: microsecType: averageBase: nfs_other_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_other_ops","title":"node_volume_nfs_other_ops","text":"

      Number of other NFS operations per second to the volume. node_volume_nfs_other_ops is volume_nfs_other_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_punch_hole_latency","title":"node_volume_nfs_punch_hole_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol hole-punch requests to the volume. node_volume_nfs_punch_hole_latency is volume_nfs_punch_hole_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.punch_hole_latencyUnit: microsecType: averageBase: nfs.punch_hole_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_punch_hole_latencyUnit: microsecType: averageBase: nfs_punch_hole_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_punch_hole_ops","title":"node_volume_nfs_punch_hole_ops","text":"

      Number of NFS hole-punch requests per second to the volume. node_volume_nfs_punch_hole_ops is volume_nfs_punch_hole_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.punch_hole_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_punch_hole_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_read_latency","title":"node_volume_nfs_read_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency. node_volume_nfs_read_latency is volume_nfs_read_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.read_latencyUnit: microsecType: averageBase: nfs.read_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_read_latencyUnit: microsecType: averageBase: nfs_read_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_read_ops","title":"node_volume_nfs_read_ops","text":"

      Number of NFS read operations per second from the volume. node_volume_nfs_read_ops is volume_nfs_read_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_setattr_latency","title":"node_volume_nfs_setattr_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol setattr requests to the volume. node_volume_nfs_setattr_latency is volume_nfs_setattr_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.setattr_latencyUnit: microsecType: averageBase: nfs.setattr_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_setattr_latencyUnit: microsecType: averageBase: nfs_setattr_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_setattr_ops","title":"node_volume_nfs_setattr_ops","text":"

      Number of NFS setattr requests per second to the volume. node_volume_nfs_setattr_ops is volume_nfs_setattr_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.setattr_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_setattr_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_total_ops","title":"node_volume_nfs_total_ops","text":"

      Number of total NFS operations per second to the volume. node_volume_nfs_total_ops is volume_nfs_total_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_write_latency","title":"node_volume_nfs_write_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency. node_volume_nfs_write_latency is volume_nfs_write_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.write_latencyUnit: microsecType: averageBase: nfs.write_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_write_latencyUnit: microsecType: averageBase: nfs_write_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_nfs_write_ops","title":"node_volume_nfs_write_ops","text":"

      Number of NFS write operations per second to the volume. node_volume_nfs_write_ops is volume_nfs_write_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_other_latency","title":"node_volume_other_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time. node_volume_other_latency is volume_other_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume other_latencyUnit: microsecType: averageBase: total_other_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_other_ops","title":"node_volume_other_ops","text":"

      Number of other operations per second to the volume. node_volume_other_ops is volume_other_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume total_other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_read_data","title":"node_volume_read_data","text":"

      Bytes read per second. node_volume_read_data is volume_read_data aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_readUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_read_latency","title":"node_volume_read_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time. node_volume_read_latency is volume_read_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_read_ops","title":"node_volume_read_ops","text":"

      Number of read operations per second from the volume. node_volume_read_ops is volume_read_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_total_ops","title":"node_volume_total_ops","text":"

      Number of operations per second serviced by the volume. node_volume_total_ops is volume_total_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_write_data","title":"node_volume_write_data","text":"

      Bytes written per second. node_volume_write_data is volume_write_data aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_writtenUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_write_latency","title":"node_volume_write_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time. node_volume_write_latency is volume_write_latency aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#node_volume_write_ops","title":"node_volume_write_ops","text":"

      Number of write operations per second to the volume. node_volume_write_ops is volume_write_ops aggregated by node.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#nvme_lif_avg_latency","title":"nvme_lif_avg_latency","text":"

      Average latency for NVMF operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_avg_other_latency","title":"nvme_lif_avg_other_latency","text":"

      Average latency for operations other than read, write, compare or compare-and-write.

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_avg_read_latency","title":"nvme_lif_avg_read_latency","text":"

      Average latency for read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_avg_write_latency","title":"nvme_lif_avg_write_latency","text":"

      Average latency for write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_other_ops","title":"nvme_lif_other_ops","text":"

      Number of operations that are not read, write, compare or compare-and-write.

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_read_data","title":"nvme_lif_read_data","text":"

      Amount of data read from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_read_ops","title":"nvme_lif_read_ops","text":"

      Number of read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_total_ops","title":"nvme_lif_total_ops","text":"

      Total number of operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_write_data","title":"nvme_lif_write_data","text":"

      Amount of data written to the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvme_lif_write_ops","title":"nvme_lif_write_ops","text":"

      Number of write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_lif write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nvmf_lif.yaml ZAPI perf-object-get-instances nvmf_fc_lif write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.10.1/nvmf_lif.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_avg_latency","title":"nvmf_rdma_port_avg_latency","text":"

      Average latency for NVMF operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_avg_other_latency","title":"nvmf_rdma_port_avg_other_latency","text":"

      Average latency for operations other than read, write, compare or compare-and-write

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_avg_read_latency","title":"nvmf_rdma_port_avg_read_latency","text":"

      Average latency for read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_avg_write_latency","title":"nvmf_rdma_port_avg_write_latency","text":"

      Average latency for write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_other_ops","title":"nvmf_rdma_port_other_ops","text":"

      Number of operations that are not read, write, compare or compare-and-right.

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port other_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_read_data","title":"nvmf_rdma_port_read_data","text":"

      Amount of data read from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port read_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_read_ops","title":"nvmf_rdma_port_read_ops","text":"

      Number of read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port read_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_total_data","title":"nvmf_rdma_port_total_data","text":"

      Amount of NVMF traffic to and from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port total_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_total_ops","title":"nvmf_rdma_port_total_ops","text":"

      Total number of operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port total_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_write_data","title":"nvmf_rdma_port_write_data","text":"

      Amount of data written to the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port write_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_rdma_port_write_ops","title":"nvmf_rdma_port_write_ops","text":"

      Number of write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_rdma_port write_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_rdma_port.yaml ZAPI perf-object-get-instances nvmf_rdma_port write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_rdma_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_avg_latency","title":"nvmf_tcp_port_avg_latency","text":"

      Average latency for NVMF operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_avg_other_latency","title":"nvmf_tcp_port_avg_other_latency","text":"

      Average latency for operations other than read, write, compare or compare-and-write

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port average_other_latencyUnit: microsecType: averageBase: other_ops conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port avg_other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_avg_read_latency","title":"nvmf_tcp_port_avg_read_latency","text":"

      Average latency for read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port average_read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port avg_read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_avg_write_latency","title":"nvmf_tcp_port_avg_write_latency","text":"

      Average latency for write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port average_write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port avg_write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_other_ops","title":"nvmf_tcp_port_other_ops","text":"

      Number of operations that are not read, write, compare or compare-and-write.

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port other_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_read_data","title":"nvmf_tcp_port_read_data","text":"

      Amount of data read from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port read_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_read_ops","title":"nvmf_tcp_port_read_ops","text":"

      Number of read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port read_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_total_data","title":"nvmf_tcp_port_total_data","text":"

      Amount of NVMF traffic to and from the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port total_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_total_ops","title":"nvmf_tcp_port_total_ops","text":"

      Total number of operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port total_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_write_data","title":"nvmf_tcp_port_write_data","text":"

      Amount of data written to the storage system

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port write_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#nvmf_tcp_port_write_ops","title":"nvmf_tcp_port_write_ops","text":"

      Number of write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/nvmf_tcp_port write_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/nvmf_tcp_port.yaml ZAPI perf-object-get-instances nvmf_tcp_port write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/nvmf_tcp_port.yaml"},{"location":"ontap-metrics/#ontaps3_logical_used_size","title":"ontaps3_logical_used_size","text":"

      Specifies the bucket logical used size up to this point. This field cannot be specified using a POST or PATCH method.

      API Endpoint Metric Template REST api/protocols/s3/buckets logical_used_size conf/rest/9.7.0/ontap_s3.yaml"},{"location":"ontap-metrics/#ontaps3_object_count","title":"ontaps3_object_count","text":"API Endpoint Metric Template REST api/private/cli/vserver/object-store-server/bucket object_count conf/rest/9.7.0/ontap_s3.yaml"},{"location":"ontap-metrics/#ontaps3_size","title":"ontaps3_size","text":"

      Specifies the bucket size in bytes; ranges from 190MB to 62PB.

      API Endpoint Metric Template REST api/protocols/s3/buckets size conf/rest/9.7.0/ontap_s3.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_failed","title":"ontaps3_svm_abort_multipart_upload_failed","text":"

      Number of failed Abort Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_failed_client_close","title":"ontaps3_svm_abort_multipart_upload_failed_client_close","text":"

      Number of times Abort Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_latency","title":"ontaps3_svm_abort_multipart_upload_latency","text":"

      Average latency for Abort Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_latencyUnit: microsecType: averageBase: abort_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: abort_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_rate","title":"ontaps3_svm_abort_multipart_upload_rate","text":"

      Number of Abort Multipart Upload operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_abort_multipart_upload_total","title":"ontaps3_svm_abort_multipart_upload_total","text":"

      Number of Abort Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_allow_access","title":"ontaps3_svm_allow_access","text":"

      Number of times access was allowed.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server allow_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server allow_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_anonymous_access","title":"ontaps3_svm_anonymous_access","text":"

      Number of times anonymous access was allowed.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server anonymous_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server anonymous_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_anonymous_deny_access","title":"ontaps3_svm_anonymous_deny_access","text":"

      Number of times anonymous access was denied.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server anonymous_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server anonymous_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_authentication_failures","title":"ontaps3_svm_authentication_failures","text":"

      Number of authentication failures.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server authentication_failuresUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server authentication_failuresUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_chunked_upload_reqs","title":"ontaps3_svm_chunked_upload_reqs","text":"

      Total number of object store server chunked object upload requests

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server chunked_upload_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server chunked_upload_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_failed","title":"ontaps3_svm_complete_multipart_upload_failed","text":"

      Number of failed Complete Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_failed_client_close","title":"ontaps3_svm_complete_multipart_upload_failed_client_close","text":"

      Number of times Complete Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_latency","title":"ontaps3_svm_complete_multipart_upload_latency","text":"

      Average latency for Complete Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_latencyUnit: microsecType: averageBase: complete_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: complete_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_rate","title":"ontaps3_svm_complete_multipart_upload_rate","text":"

      Number of Complete Multipart Upload operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_complete_multipart_upload_total","title":"ontaps3_svm_complete_multipart_upload_total","text":"

      Number of Complete Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_connected_connections","title":"ontaps3_svm_connected_connections","text":"

      Number of object store server connections currently established

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server connected_connectionsUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server connected_connectionsUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_connections","title":"ontaps3_svm_connections","text":"

      Total number of object store server connections.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server connectionsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server connectionsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_failed","title":"ontaps3_svm_create_bucket_failed","text":"

      Number of failed Create Bucket operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_failed_client_close","title":"ontaps3_svm_create_bucket_failed_client_close","text":"

      Number of times Create Bucket operation failed because client terminated connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_latency","title":"ontaps3_svm_create_bucket_latency","text":"

      Average latency for Create Bucket operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_latencyUnit: microsecType: averageBase: create_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: create_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_rate","title":"ontaps3_svm_create_bucket_rate","text":"

      Number of Create Bucket operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_create_bucket_total","title":"ontaps3_svm_create_bucket_total","text":"

      Number of Create Bucket operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_default_deny_access","title":"ontaps3_svm_default_deny_access","text":"

      Number of times access was denied by default and not through any policy statement.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server default_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server default_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_failed","title":"ontaps3_svm_delete_bucket_failed","text":"

      Number of failed Delete Bucket operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_failed_client_close","title":"ontaps3_svm_delete_bucket_failed_client_close","text":"

      Number of times Delete Bucket operation failed because client terminated connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_latency","title":"ontaps3_svm_delete_bucket_latency","text":"

      Average latency for Delete Bucket operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_latencyUnit: microsecType: averageBase: delete_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: delete_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_rate","title":"ontaps3_svm_delete_bucket_rate","text":"

      Number of Delete Bucket operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_bucket_total","title":"ontaps3_svm_delete_bucket_total","text":"

      Number of Delete Bucket operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_failed","title":"ontaps3_svm_delete_object_failed","text":"

      Number of failed DELETE object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_failed_client_close","title":"ontaps3_svm_delete_object_failed_client_close","text":"

      Number of times DELETE object operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_latency","title":"ontaps3_svm_delete_object_latency","text":"

      Average latency for DELETE object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_latencyUnit: microsecType: averageBase: delete_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_latencyUnit: microsecType: average,no-zero-valuesBase: delete_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_rate","title":"ontaps3_svm_delete_object_rate","text":"

      Number of DELETE object operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_failed","title":"ontaps3_svm_delete_object_tagging_failed","text":"

      Number of failed DELETE object tagging operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_failed_client_close","title":"ontaps3_svm_delete_object_tagging_failed_client_close","text":"

      Number of times DELETE object tagging operation failed because client terminated connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_latency","title":"ontaps3_svm_delete_object_tagging_latency","text":"

      Average latency for DELETE object tagging operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_latencyUnit: microsecType: averageBase: delete_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: delete_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_rate","title":"ontaps3_svm_delete_object_tagging_rate","text":"

      Number of DELETE object tagging operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_tagging_total","title":"ontaps3_svm_delete_object_tagging_total","text":"

      Number of DELETE object tagging operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_delete_object_total","title":"ontaps3_svm_delete_object_total","text":"

      Number of DELETE object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_explicit_deny_access","title":"ontaps3_svm_explicit_deny_access","text":"

      Number of times access was denied explicitly by a policy statement.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server explicit_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server explicit_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_bucket_acl_failed","title":"ontaps3_svm_get_bucket_acl_failed","text":"

      Number of failed GET Bucket ACL operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_acl_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_acl_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_bucket_acl_total","title":"ontaps3_svm_get_bucket_acl_total","text":"

      Number of GET Bucket ACL operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_acl_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_acl_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_bucket_versioning_failed","title":"ontaps3_svm_get_bucket_versioning_failed","text":"

      Number of failed Get Bucket Versioning operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_versioning_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_versioning_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_bucket_versioning_total","title":"ontaps3_svm_get_bucket_versioning_total","text":"

      Number of Get Bucket Versioning operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_versioning_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_versioning_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_data","title":"ontaps3_svm_get_data","text":"

      Rate of GET object data transfers per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_acl_failed","title":"ontaps3_svm_get_object_acl_failed","text":"

      Number of failed GET Object ACL operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_acl_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_acl_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_acl_total","title":"ontaps3_svm_get_object_acl_total","text":"

      Number of GET Object ACL operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_acl_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_acl_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_failed","title":"ontaps3_svm_get_object_failed","text":"

      Number of failed GET object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_failed_client_close","title":"ontaps3_svm_get_object_failed_client_close","text":"

      Number of times GET object operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_lastbyte_latency","title":"ontaps3_svm_get_object_lastbyte_latency","text":"

      Average last-byte latency for GET object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_lastbyte_latencyUnit: microsecType: averageBase: get_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_lastbyte_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_lastbyte_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_latency","title":"ontaps3_svm_get_object_latency","text":"

      Average first-byte latency for GET object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_latencyUnit: microsecType: averageBase: get_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_rate","title":"ontaps3_svm_get_object_rate","text":"

      Number of GET object operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_failed","title":"ontaps3_svm_get_object_tagging_failed","text":"

      Number of failed GET object tagging operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_failed_client_close","title":"ontaps3_svm_get_object_tagging_failed_client_close","text":"

      Number of times GET object tagging operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_latency","title":"ontaps3_svm_get_object_tagging_latency","text":"

      Average latency for GET object tagging operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_latencyUnit: microsecType: averageBase: get_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_rate","title":"ontaps3_svm_get_object_tagging_rate","text":"

      Number of GET object tagging operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_tagging_total","title":"ontaps3_svm_get_object_tagging_total","text":"

      Number of GET object tagging operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_get_object_total","title":"ontaps3_svm_get_object_total","text":"

      Number of GET object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_group_policy_evaluated","title":"ontaps3_svm_group_policy_evaluated","text":"

      Number of times group policies were evaluated.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server group_policy_evaluatedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server group_policy_evaluatedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_failed","title":"ontaps3_svm_head_bucket_failed","text":"

      Number of failed HEAD bucket operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_failed_client_close","title":"ontaps3_svm_head_bucket_failed_client_close","text":"

      Number of times HEAD bucket operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_latency","title":"ontaps3_svm_head_bucket_latency","text":"

      Average latency for HEAD bucket operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_latencyUnit: microsecType: averageBase: head_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: head_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_rate","title":"ontaps3_svm_head_bucket_rate","text":"

      Number of HEAD bucket operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_bucket_total","title":"ontaps3_svm_head_bucket_total","text":"

      Number of HEAD bucket operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_failed","title":"ontaps3_svm_head_object_failed","text":"

      Number of failed HEAD Object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_failed_client_close","title":"ontaps3_svm_head_object_failed_client_close","text":"

      Number of times HEAD object operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_latency","title":"ontaps3_svm_head_object_latency","text":"

      Average latency for HEAD object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_latencyUnit: microsecType: averageBase: head_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_latencyUnit: microsecType: average,no-zero-valuesBase: head_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_rate","title":"ontaps3_svm_head_object_rate","text":"

      Number of HEAD Object operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_head_object_total","title":"ontaps3_svm_head_object_total","text":"

      Number of HEAD Object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_failed","title":"ontaps3_svm_initiate_multipart_upload_failed","text":"

      Number of failed Initiate Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_failed_client_close","title":"ontaps3_svm_initiate_multipart_upload_failed_client_close","text":"

      Number of times Initiate Multipart Upload operation failed because client terminated connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_latency","title":"ontaps3_svm_initiate_multipart_upload_latency","text":"

      Average latency for Initiate Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_latencyUnit: microsecType: averageBase: initiate_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: initiate_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_rate","title":"ontaps3_svm_initiate_multipart_upload_rate","text":"

      Number of Initiate Multipart Upload operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_initiate_multipart_upload_total","title":"ontaps3_svm_initiate_multipart_upload_total","text":"

      Number of Initiate Multipart Upload operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_input_flow_control_entry","title":"ontaps3_svm_input_flow_control_entry","text":"

      Number of times input flow control was entered.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server input_flow_control_entryUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server input_flow_control_entryUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_input_flow_control_exit","title":"ontaps3_svm_input_flow_control_exit","text":"

      Number of times input flow control was exited.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server input_flow_control_exitUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server input_flow_control_exitUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_failed","title":"ontaps3_svm_list_buckets_failed","text":"

      Number of failed LIST Buckets operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_failed_client_close","title":"ontaps3_svm_list_buckets_failed_client_close","text":"

      Number of times LIST Bucket operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_latency","title":"ontaps3_svm_list_buckets_latency","text":"

      Average latency for LIST Buckets operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_latencyUnit: microsecType: averageBase: list_buckets_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_latencyUnit: microsecType: average,no-zero-valuesBase: head_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_rate","title":"ontaps3_svm_list_buckets_rate","text":"

      Number of LIST Buckets operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_buckets_total","title":"ontaps3_svm_list_buckets_total","text":"

      Number of LIST Buckets operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_failed","title":"ontaps3_svm_list_object_versions_failed","text":"

      Number of failed LIST object versions operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_failed_client_close","title":"ontaps3_svm_list_object_versions_failed_client_close","text":"

      Number of times LIST object versions operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_latency","title":"ontaps3_svm_list_object_versions_latency","text":"

      Average latency for LIST Object versions operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_latencyUnit: microsecType: averageBase: list_object_versions_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_latencyUnit: microsecType: average,no-zero-valuesBase: list_object_versions_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_rate","title":"ontaps3_svm_list_object_versions_rate","text":"

      Number of LIST Object Versions operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_object_versions_total","title":"ontaps3_svm_list_object_versions_total","text":"

      Number of LIST Object Versions operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_failed","title":"ontaps3_svm_list_objects_failed","text":"

      Number of failed LIST objects operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_failed_client_close","title":"ontaps3_svm_list_objects_failed_client_close","text":"

      Number of times LIST objects operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_latency","title":"ontaps3_svm_list_objects_latency","text":"

      Average latency for LIST Objects operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_latencyUnit: microsecType: averageBase: list_objects_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_latencyUnit: microsecType: average,no-zero-valuesBase: list_objects_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_rate","title":"ontaps3_svm_list_objects_rate","text":"

      Number of LIST Objects operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_objects_total","title":"ontaps3_svm_list_objects_total","text":"

      Number of LIST Objects operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_failed","title":"ontaps3_svm_list_uploads_failed","text":"

      Number of failed LIST Upload operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_failed_client_close","title":"ontaps3_svm_list_uploads_failed_client_close","text":"

      Number of times LIST Upload operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_latency","title":"ontaps3_svm_list_uploads_latency","text":"

      Average latency for LIST Upload operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_latencyUnit: microsecType: averageBase: list_uploads_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_latencyUnit: microsecType: average,no-zero-valuesBase: list_uploads_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_rate","title":"ontaps3_svm_list_uploads_rate","text":"

      Number of LIST Upload operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_list_uploads_total","title":"ontaps3_svm_list_uploads_total","text":"

      Number of LIST Upload operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_max_cmds_per_connection","title":"ontaps3_svm_max_cmds_per_connection","text":"

      Maximum commands pipelined at any instance on a connection.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_commands_per_connectionUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_cmds_per_connectionUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_max_connected_connections","title":"ontaps3_svm_max_connected_connections","text":"

      Maximum number of object store server connections established at one time

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_connected_connectionsUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_connected_connectionsUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_max_requests_outstanding","title":"ontaps3_svm_max_requests_outstanding","text":"

      Maximum number of object store server requests in process at one time

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_requests_outstandingUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_requests_outstandingUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_multi_delete_reqs","title":"ontaps3_svm_multi_delete_reqs","text":"

      Total number of object store server multiple object delete requests

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server multiple_delete_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server multi_delete_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_output_flow_control_entry","title":"ontaps3_svm_output_flow_control_entry","text":"

      Number of output flow control was entered.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server output_flow_control_entryUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server output_flow_control_entryUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_output_flow_control_exit","title":"ontaps3_svm_output_flow_control_exit","text":"

      Number of times output flow control was exited.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server output_flow_control_exitUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server output_flow_control_exitUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_presigned_url_reqs","title":"ontaps3_svm_presigned_url_reqs","text":"

      Total number of presigned object store server URL requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server presigned_url_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server presigned_url_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_bucket_versioning_failed","title":"ontaps3_svm_put_bucket_versioning_failed","text":"

      Number of failed Put Bucket Versioning operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_bucket_versioning_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_bucket_versioning_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_bucket_versioning_total","title":"ontaps3_svm_put_bucket_versioning_total","text":"

      Number of Put Bucket Versioning operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_bucket_versioning_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_bucket_versioning_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_data","title":"ontaps3_svm_put_data","text":"

      Rate of PUT object data transfers per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_failed","title":"ontaps3_svm_put_object_failed","text":"

      Number of failed PUT object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_failed_client_close","title":"ontaps3_svm_put_object_failed_client_close","text":"

      Number of times PUT object operation failed due to the case where client closed the connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_latency","title":"ontaps3_svm_put_object_latency","text":"

      Average latency for PUT object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_latencyUnit: microsecType: averageBase: put_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_latencyUnit: microsecType: average,no-zero-valuesBase: put_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_rate","title":"ontaps3_svm_put_object_rate","text":"

      Number of PUT object operations per second

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_failed","title":"ontaps3_svm_put_object_tagging_failed","text":"

      Number of failed PUT object tagging operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_failed_client_close","title":"ontaps3_svm_put_object_tagging_failed_client_close","text":"

      Number of times PUT object tagging operation failed because client terminated connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_latency","title":"ontaps3_svm_put_object_tagging_latency","text":"

      Average latency for PUT object tagging operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_latencyUnit: microsecType: averageBase: put_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: put_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_rate","title":"ontaps3_svm_put_object_tagging_rate","text":"

      Number of PUT object tagging operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_tagging_total","title":"ontaps3_svm_put_object_tagging_total","text":"

      Number of PUT object tagging operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_put_object_total","title":"ontaps3_svm_put_object_total","text":"

      Number of PUT object operations

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_request_parse_errors","title":"ontaps3_svm_request_parse_errors","text":"

      Number of request parser errors due to malformed requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server request_parse_errorsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server request_parse_errorsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_requests","title":"ontaps3_svm_requests","text":"

      Total number of object store server requests

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server requestsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_requests_outstanding","title":"ontaps3_svm_requests_outstanding","text":"

      Number of object store server requests in process

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server requests_outstandingUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server requests_outstandingUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_root_user_access","title":"ontaps3_svm_root_user_access","text":"

      Number of times access was done by root user.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server root_user_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server root_user_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_server_connection_close","title":"ontaps3_svm_server_connection_close","text":"

      Number of connection closes triggered by server due to fatal errors.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server server_connection_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server server_connection_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_signature_v2_reqs","title":"ontaps3_svm_signature_v2_reqs","text":"

      Total number of object store server signature V2 requests

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server signature_v2_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server signature_v2_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_signature_v4_reqs","title":"ontaps3_svm_signature_v4_reqs","text":"

      Total number of object store server signature V4 requests

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server signature_v4_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server signature_v4_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_tagging","title":"ontaps3_svm_tagging","text":"

      Number of requests with tagging specified.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server taggingUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server taggingUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_failed","title":"ontaps3_svm_upload_part_failed","text":"

      Number of failed Upload Part operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_failed_client_close","title":"ontaps3_svm_upload_part_failed_client_close","text":"

      Number of times Upload Part operation failed because client terminated connection while the operation was still pending on server.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_latency","title":"ontaps3_svm_upload_part_latency","text":"

      Average latency for Upload Part operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_latencyUnit: microsecType: averageBase: upload_part_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_latencyUnit: microsecType: average,no-zero-valuesBase: upload_part_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_rate","title":"ontaps3_svm_upload_part_rate","text":"

      Number of Upload Part operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_svm_upload_part_total","title":"ontaps3_svm_upload_part_total","text":"

      Number of Upload Part operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#ontaps3_used_percent","title":"ontaps3_used_percent","text":"

      The used_percent metric the percentage of a bucket's total capacity that is currently being used.

      API Endpoint Metric Template REST api/protocols/s3/buckets logical_used_size, size conf/rest/9.7.0/ontap_s3.yaml"},{"location":"ontap-metrics/#path_read_data","title":"path_read_data","text":"

      The average read throughput in kilobytes per second read from the indicated target port by the controller.

      API Endpoint Metric Template REST api/cluster/counter/tables/path read_dataUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path read_dataUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_read_iops","title":"path_read_iops","text":"

      The number of I/O read operations sent from the initiator port to the indicated target port.

      API Endpoint Metric Template REST api/cluster/counter/tables/path read_iopsUnit: per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path read_iopsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_read_latency","title":"path_read_latency","text":"

      The average latency of I/O read operations sent from this controller to the indicated target port.

      API Endpoint Metric Template REST api/cluster/counter/tables/path read_latencyUnit: microsecType: averageBase: read_iops conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path read_latencyUnit: microsecType: averageBase: read_iops conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_total_data","title":"path_total_data","text":"

      The average throughput in kilobytes per second read and written from/to the indicated target port by the controller.

      API Endpoint Metric Template REST api/cluster/counter/tables/path total_dataUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path total_dataUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_total_iops","title":"path_total_iops","text":"

      The number of total read/write I/O operations sent from the initiator port to the indicated target port.

      API Endpoint Metric Template REST api/cluster/counter/tables/path total_iopsUnit: per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path total_iopsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_write_data","title":"path_write_data","text":"

      The average write throughput in kilobytes per second written to the indicated target port by the controller.

      API Endpoint Metric Template REST api/cluster/counter/tables/path write_dataUnit: kb_per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path write_dataUnit: kb_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_write_iops","title":"path_write_iops","text":"

      The number of I/O write operations sent from the initiator port to the indicated target port.

      API Endpoint Metric Template REST api/cluster/counter/tables/path write_iopsUnit: per_secType: rateBase: conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path write_iopsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#path_write_latency","title":"path_write_latency","text":"

      The average latency of I/O write operations sent from this controller to the indicated target port.

      API Endpoint Metric Template REST api/cluster/counter/tables/path write_latencyUnit: microsecType: averageBase: write_iops conf/restperf/9.12.0/path.yaml ZAPI perf-object-get-instances path write_latencyUnit: microsecType: averageBase: write_iops conf/zapiperf/cdot/9.8.0/path.yaml"},{"location":"ontap-metrics/#plex_disk_busy","title":"plex_disk_busy","text":"

      The utilization percent of the disk. plex_disk_busy is disk_busy aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_capacity","title":"plex_disk_capacity","text":"

      Disk capacity in MB. plex_disk_capacity is disk_capacity aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_cp_read_chain","title":"plex_disk_cp_read_chain","text":"

      Average number of blocks transferred in each consistency point read operation during a CP. plex_disk_cp_read_chain is disk_cp_read_chain aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_cp_read_latency","title":"plex_disk_cp_read_latency","text":"

      Average latency per block in microseconds for consistency point read operations. plex_disk_cp_read_latency is disk_cp_read_latency aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_cp_reads","title":"plex_disk_cp_reads","text":"

      Number of disk read operations initiated each second for consistency point processing. plex_disk_cp_reads is disk_cp_reads aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_io_pending","title":"plex_disk_io_pending","text":"

      Average number of I/Os issued to the disk for which we have not yet received the response. plex_disk_io_pending is disk_io_pending aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_io_queued","title":"plex_disk_io_queued","text":"

      Number of I/Os queued to the disk but not yet issued. plex_disk_io_queued is disk_io_queued aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_total_data","title":"plex_disk_total_data","text":"

      Total throughput for user operations per second. plex_disk_total_data is disk_total_data aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_total_transfers","title":"plex_disk_total_transfers","text":"

      Total number of disk operations involving data transfer initiated per second. plex_disk_total_transfers is disk_total_transfers aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_read_blocks","title":"plex_disk_user_read_blocks","text":"

      Number of blocks transferred for user read operations per second. plex_disk_user_read_blocks is disk_user_read_blocks aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_read_chain","title":"plex_disk_user_read_chain","text":"

      Average number of blocks transferred in each user read operation. plex_disk_user_read_chain is disk_user_read_chain aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_read_latency","title":"plex_disk_user_read_latency","text":"

      Average latency per block in microseconds for user read operations. plex_disk_user_read_latency is disk_user_read_latency aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_reads","title":"plex_disk_user_reads","text":"

      Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. plex_disk_user_reads is disk_user_reads aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_write_blocks","title":"plex_disk_user_write_blocks","text":"

      Number of blocks transferred for user write operations per second. plex_disk_user_write_blocks is disk_user_write_blocks aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_write_chain","title":"plex_disk_user_write_chain","text":"

      Average number of blocks transferred in each user write operation. plex_disk_user_write_chain is disk_user_write_chain aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_write_latency","title":"plex_disk_user_write_latency","text":"

      Average latency per block in microseconds for user write operations. plex_disk_user_write_latency is disk_user_write_latency aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#plex_disk_user_writes","title":"plex_disk_user_writes","text":"

      Number of disk write operations initiated each second for storing data or metadata associated with user requests. plex_disk_user_writes is disk_user_writes aggregated by plex.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#qos_concurrency","title":"qos_concurrency","text":"

      This is the average number of concurrent requests for the workload.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume concurrencyUnit: noneType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume concurrencyUnit: noneType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_detail_resource_latency","title":"qos_detail_resource_latency","text":"

      This refers to the average latency for workloads within the subsystems of Data ONTAP. These subsystems are the various modules or components within the system that could contribute to delays or latency during data or task processing. The calculated latency includes both the processing time within the subsystem and the waiting time at that subsystem. Below is the description of subsystems' latency.

      • frontend: Represents the delays in the network layer of ONTAP.
      • backend: Represents the delays in the data/WAFL layer of ONTAP.
      • cluster: Represents delays caused by the cluster switches, cables, and adapters which physically connect clustered nodes.If the cluster interconnect component is in contention, it means high wait time for I/O requests at the cluster interconnect is impacting the latency of one or more workloads.
      • cp: Represents delays due to buffered write flushes, called consistency points (cp).
      • disk: Represents slowness due to attached hard drives or solid state drives.
      • network: Note: Typically these latencies only apply to SAN not NAS. Represents the wait time of I/O requests by the external networking protocols on the cluster. The wait time is time spent waiting for transfer ready transactions to finish before the cluster can respond to an I/O request. If the network component is in contention, it means high wait time at the protocol layer is impacting the latency of one or more workloads.
      • nvlog: Represents delays due to mirroring writes to the NVRAM/NVLOG memory and to the HA partner NVRAM/NVLOG memory.
      • suspend: Represents delays due to operations suspending on a delay mechanism. Typically this is diagnosed by NetApp Support.
      • throttle: Represents the throughput maximum (ceiling) setting of the storage Quality of Service (QoS) policy group assigned to the workload. If the policy group component is in contention, it means all workloads in the policy group are being throttled by the set throughput limit, which is impacting the latency of one or more of those workloads.
      • qos_min: Represents the latency to a workload that is being caused by QoS throughput floor (expected) setting assigned to other workloads. If the QoS floor set on certain workloads use the majority of the bandwidth to guarantee the promised throughput, other workloads will be throttled and see more latency.
      • cloud: Represents the software component in the cluster involved with I/O processing between the cluster and the cloud tier on which user data is stored. If the cloud latency component is in contention, it means that a large amount of reads from volumes that are hosted on the cloud tier are impacting the latency of one or more workloads.
      API Endpoint Metric Template REST api/cluster/counter/tables/qos_detail Harvest generatedUnit: microsecondsType: averageBase: ops conf/restperf/9.12.0/workload_detail.yaml ZAPI perf-object-get-instances workload_detail Harvest generatedUnit: microsecondsType: averageBase: ops conf/zapiperf/9.12.0/workload_detail.yaml"},{"location":"ontap-metrics/#qos_detail_service_time_latency","title":"qos_detail_service_time_latency","text":"

      This refers to the average service time for workload within the subsystems of the Data ONTAP. These subsystems are the various modules or components within the system that could contribute to delays or latency during data or task processing. This latency is the processing time within the subsystem.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_detail Harvest generatedUnit: microsecondsType: averageBase: ops conf/restperf/9.12.0/workload_detail.yaml ZAPI perf-object-get-instances workload_detail Harvest generatedUnit: microsecondsType: averageBase: ops conf/zapiperf/9.12.0/workload_detail.yaml"},{"location":"ontap-metrics/#qos_latency","title":"qos_latency","text":"

      This is the average response time for requests that were initiated by the workload.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume latencyUnit: microsecType: averageBase: ops conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume latencyUnit: microsecType: average,no-zero-valuesBase: ops conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_ops","title":"qos_ops","text":"

      This field is the workload's rate of operations that completed during the measurement interval; measured per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume opsUnit: per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_other_ops","title":"qos_other_ops","text":"

      This is the rate of this workload's other operations that completed during the measurement interval.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/workload.yaml ZAPI perf-object-get-instances workload_volume other_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_read_data","title":"qos_read_data","text":"

      This is the amount of data read per second from the filer by the workload.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume read_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume read_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_read_io_type","title":"qos_read_io_type","text":"

      This is the percentage of read requests served from various components (such as buffer cache, ext_cache, disk, etc.).

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume read_io_type_percentUnit: percentType: percentBase: read_io_type_base conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume read_io_typeUnit: percentType: percentBase: read_io_type_base conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_read_latency","title":"qos_read_latency","text":"

      This is the average response time for read requests that were initiated by the workload.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume read_latencyUnit: microsecType: average,no-zero-valuesBase: read_ops conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_read_ops","title":"qos_read_ops","text":"

      This is the rate of this workload's read operations that completed during the measurement interval.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume read_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_sequential_reads","title":"qos_sequential_reads","text":"

      This is the percentage of reads, performed on behalf of the workload, that were sequential.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume sequential_reads_percentUnit: percentType: percentBase: sequential_reads_base conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume sequential_readsUnit: percentType: percent,no-zero-valuesBase: sequential_reads_base conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_sequential_writes","title":"qos_sequential_writes","text":"

      This is the percentage of writes, performed on behalf of the workload, that were sequential. This counter is only available on platforms with more than 4GB of NVRAM.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume sequential_writes_percentUnit: percentType: percentBase: sequential_writes_base conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume sequential_writesUnit: percentType: percent,no-zero-valuesBase: sequential_writes_base conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_total_data","title":"qos_total_data","text":"

      This is the total amount of data read/written per second from/to the filer by the workload.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume total_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_write_data","title":"qos_write_data","text":"

      This is the amount of data written per second to the filer by the workload.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume write_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume write_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_write_latency","title":"qos_write_latency","text":"

      This is the average response time for write requests that were initiated by the workload.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume write_latencyUnit: microsecType: average,no-zero-valuesBase: write_ops conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qos_write_ops","title":"qos_write_ops","text":"

      This is the workload's write operations that completed during the measurement interval; measured per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/qos_volume write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/workload_volume.yaml ZAPI perf-object-get-instances workload_volume write_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/workload_volume.yaml"},{"location":"ontap-metrics/#qtree_cifs_ops","title":"qtree_cifs_ops","text":"

      Number of CIFS operations per second to the qtree

      API Endpoint Metric Template REST api/cluster/counter/tables/qtree cifs_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/qtree.yaml ZAPI perf-object-get-instances qtree cifs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#qtree_id","title":"qtree_id","text":"

      The identifier for the qtree, unique within the qtree's volume.

      API Endpoint Metric Template REST api/storage/qtrees id conf/rest/9.12.0/qtree.yaml"},{"location":"ontap-metrics/#qtree_internal_ops","title":"qtree_internal_ops","text":"

      Number of internal operations generated by activites such as snapmirror and backup per second to the qtree

      API Endpoint Metric Template REST api/cluster/counter/tables/qtree internal_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/qtree.yaml ZAPI perf-object-get-instances qtree internal_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#qtree_nfs_ops","title":"qtree_nfs_ops","text":"

      Number of NFS operations per second to the qtree

      API Endpoint Metric Template REST api/cluster/counter/tables/qtree nfs_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/qtree.yaml ZAPI perf-object-get-instances qtree nfs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#qtree_total_ops","title":"qtree_total_ops","text":"

      Summation of NFS ops, CIFS ops, CSS ops and internal ops

      API Endpoint Metric Template REST api/cluster/counter/tables/qtree total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/qtree.yaml ZAPI perf-object-get-instances qtree total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_limit","title":"quota_disk_limit","text":"

      Maximum amount of disk space, in kilobytes, allowed for the quota target (hard disk space limit). The value is -1 if the limit is unlimited.

      API Endpoint Metric Template REST api/storage/quota/reports space.hard_limit conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter disk-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_used","title":"quota_disk_used","text":"

      Current amount of disk space, in kilobytes, used by the quota target.

      API Endpoint Metric Template REST api/storage/quota/reports space.used.total conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter disk-used conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_used_pct_disk_limit","title":"quota_disk_used_pct_disk_limit","text":"

      Current disk space used expressed as a percentage of hard disk limit.

      API Endpoint Metric Template REST api/storage/quota/reports space.used.hard_limit_percent conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter disk-used-pct-disk-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_used_pct_soft_disk_limit","title":"quota_disk_used_pct_soft_disk_limit","text":"

      Current disk space used expressed as a percentage of soft disk limit.

      API Endpoint Metric Template REST api/storage/quota/reports space.used.soft_limit_percent conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter disk-used-pct-soft-disk-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_disk_used_pct_threshold","title":"quota_disk_used_pct_threshold","text":"

      Current disk space used expressed as a percentage of threshold.

      API Endpoint Metric Template ZAPI quota-report-iter disk-used-pct-threshold conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_file_limit","title":"quota_file_limit","text":"

      Maximum number of files allowed for the quota target (hard files limit). The value is -1 if the limit is unlimited.

      API Endpoint Metric Template REST api/storage/quota/reports files.hard_limit conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter file-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_files_used","title":"quota_files_used","text":"

      Current number of files used by the quota target.

      API Endpoint Metric Template REST api/storage/quota/reports files.used.total conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter files-used conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_files_used_pct_file_limit","title":"quota_files_used_pct_file_limit","text":"

      Current number of files used expressed as a percentage of hard file limit.

      API Endpoint Metric Template REST api/storage/quota/reports files.used.hard_limit_percent conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter files-used-pct-file-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_files_used_pct_soft_file_limit","title":"quota_files_used_pct_soft_file_limit","text":"

      Current number of files used expressed as a percentage of soft file limit.

      API Endpoint Metric Template REST api/storage/quota/reports files.used.soft_limit_percent conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter files-used-pct-soft-file-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_soft_disk_limit","title":"quota_soft_disk_limit","text":"

      soft disk space limit, in kilobytes, for the quota target. The value is -1 if the limit is unlimited.

      API Endpoint Metric Template REST api/storage/quota/reports space.soft_limit conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter soft-disk-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_soft_file_limit","title":"quota_soft_file_limit","text":"

      Soft file limit, in number of files, for the quota target. The value is -1 if the limit is unlimited.

      API Endpoint Metric Template REST api/storage/quota/reports files.soft_limit conf/rest/9.12.0/qtree.yaml ZAPI quota-report-iter soft-file-limit conf/zapi/cdot/9.8.0/qtree.yaml"},{"location":"ontap-metrics/#quota_threshold","title":"quota_threshold","text":"

      Disk space threshold, in kilobytes, for the quota target. The value is -1 if the limit is unlimited.

      API Endpoint Metric Template ZAPI quota-report-iter threshold conf/zapi/cdot/9.8.0/qtree.yaml REST NA Harvest generated conf/rest/9.12.0/qtree.yaml"},{"location":"ontap-metrics/#raid_disk_busy","title":"raid_disk_busy","text":"

      The utilization percent of the disk. raid_disk_busy is disk_busy aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent disk_busy_percentUnit: percentType: percentBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_busyUnit: percentType: percentBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_capacity","title":"raid_disk_capacity","text":"

      Disk capacity in MB. raid_disk_capacity is disk_capacity aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent capacityUnit: mbType: rawBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent disk_capacityUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_cp_read_chain","title":"raid_disk_cp_read_chain","text":"

      Average number of blocks transferred in each consistency point read operation during a CP. raid_disk_cp_read_chain is disk_cp_read_chain aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_chainUnit: noneType: averageBase: cp_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_chainUnit: noneType: averageBase: cp_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_cp_read_latency","title":"raid_disk_cp_read_latency","text":"

      Average latency per block in microseconds for consistency point read operations. raid_disk_cp_read_latency is disk_cp_read_latency aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_read_latencyUnit: microsecType: averageBase: cp_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_cp_reads","title":"raid_disk_cp_reads","text":"

      Number of disk read operations initiated each second for consistency point processing. raid_disk_cp_reads is disk_cp_reads aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent cp_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent cp_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_io_pending","title":"raid_disk_io_pending","text":"

      Average number of I/Os issued to the disk for which we have not yet received the response. raid_disk_io_pending is disk_io_pending aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_pendingUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_io_queued","title":"raid_disk_io_queued","text":"

      Number of I/Os queued to the disk but not yet issued. raid_disk_io_queued is disk_io_queued aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent io_queuedUnit: noneType: averageBase: base_for_disk_busy conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_total_data","title":"raid_disk_total_data","text":"

      Total throughput for user operations per second. raid_disk_total_data is disk_total_data aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_dataUnit: b_per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_total_transfers","title":"raid_disk_total_transfers","text":"

      Total number of disk operations involving data transfer initiated per second. raid_disk_total_transfers is disk_total_transfers aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent total_transfer_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent total_transfersUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_read_blocks","title":"raid_disk_user_read_blocks","text":"

      Number of blocks transferred for user read operations per second. raid_disk_user_read_blocks is disk_user_read_blocks aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_read_chain","title":"raid_disk_user_read_chain","text":"

      Average number of blocks transferred in each user read operation. raid_disk_user_read_chain is disk_user_read_chain aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_chainUnit: noneType: averageBase: user_read_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_chainUnit: noneType: averageBase: user_reads conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_read_latency","title":"raid_disk_user_read_latency","text":"

      Average latency per block in microseconds for user read operations. raid_disk_user_read_latency is disk_user_read_latency aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_read_latencyUnit: microsecType: averageBase: user_read_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_reads","title":"raid_disk_user_reads","text":"

      Number of disk read operations initiated each second for retrieving data or metadata associated with user requests. raid_disk_user_reads is disk_user_reads aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_read_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_readsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_write_blocks","title":"raid_disk_user_write_blocks","text":"

      Number of blocks transferred for user write operations per second. raid_disk_user_write_blocks is disk_user_write_blocks aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_block_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_blocksUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_write_chain","title":"raid_disk_user_write_chain","text":"

      Average number of blocks transferred in each user write operation. raid_disk_user_write_chain is disk_user_write_chain aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_chainUnit: noneType: averageBase: user_write_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_chainUnit: noneType: averageBase: user_writes conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_write_latency","title":"raid_disk_user_write_latency","text":"

      Average latency per block in microseconds for user write operations. raid_disk_user_write_latency is disk_user_write_latency aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_block_count conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_write_latencyUnit: microsecType: averageBase: user_write_blocks conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#raid_disk_user_writes","title":"raid_disk_user_writes","text":"

      Number of disk write operations initiated each second for storing data or metadata associated with user requests. raid_disk_user_writes is disk_user_writes aggregated by raid.

      API Endpoint Metric Template REST api/cluster/counter/tables/disk:constituent user_write_countUnit: per_secType: rateBase: conf/restperf/9.12.0/disk.yaml ZAPI perf-object-get-instances disk:constituent user_writesUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#rw_ctx_cifs_giveups","title":"rw_ctx_cifs_giveups","text":"

      Array of number of given-ups of cifs ops because they rewind more than a certain threshold, categorized by their rewind reasons.

      API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx cifs_giveupsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_cifs_rewinds","title":"rw_ctx_cifs_rewinds","text":"

      Array of number of rewinds for cifs ops based on their reasons.

      API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx cifs_rewindsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_nfs_giveups","title":"rw_ctx_nfs_giveups","text":"

      Array of number of given-ups of nfs ops because they rewind more than a certain threshold, categorized by their rewind reasons.

      API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx nfs_giveupsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_nfs_rewinds","title":"rw_ctx_nfs_rewinds","text":"

      Array of number of rewinds for nfs ops based on their reasons.

      API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx nfs_rewindsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_qos_flowcontrol","title":"rw_ctx_qos_flowcontrol","text":"

      The number of times QoS limiting has enabled stream flowcontrol.

      API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx qos_flowcontrolUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#rw_ctx_qos_rewinds","title":"rw_ctx_qos_rewinds","text":"

      The number of restarts after a rewind because of QoS limiting.

      API Endpoint Metric Template ZAPI perf-object-get-instances rw_ctx qos_rewindsUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/rwctx.yaml"},{"location":"ontap-metrics/#security_audit_destination_port","title":"security_audit_destination_port","text":"

      The destination port used to forward the message.

      API Endpoint Metric Template ZAPI cluster-log-forward-get-iter cluster-log-forward-info.port conf/zapi/cdot/9.8.0/security_audit_dest.yaml"},{"location":"ontap-metrics/#security_certificate_expiry_time","title":"security_certificate_expiry_time","text":"API Endpoint Metric Template REST api/private/cli/security/certificate expiration conf/rest/9.12.0/security_certificate.yaml ZAPI security-certificate-get-iter certificate-info.expiration-date conf/zapi/cdot/9.8.0/security_certificate.yaml"},{"location":"ontap-metrics/#security_ssh_max_instances","title":"security_ssh_max_instances","text":"

      Maximum possible simultaneous connections.

      API Endpoint Metric Template REST api/security/ssh max_instances conf/rest/9.12.0/security_ssh.yaml"},{"location":"ontap-metrics/#shelf_average_ambient_temperature","title":"shelf_average_ambient_temperature","text":"

      Average temperature of all ambient sensors for shelf in Celsius.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_average_fan_speed","title":"shelf_average_fan_speed","text":"

      Average fan speed for shelf in rpm.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_average_temperature","title":"shelf_average_temperature","text":"

      Average temperature of all non-ambient sensors for shelf in Celsius.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_disk_count","title":"shelf_disk_count","text":"

      Disk count in a shelf.

      API Endpoint Metric Template REST api/storage/shelves disk_count conf/rest/9.12.0/shelf.yaml ZAPI storage-shelf-info-get-iter storage-shelf-info.disk-count conf/zapi/cdot/9.8.0/shelf.yaml"},{"location":"ontap-metrics/#shelf_max_fan_speed","title":"shelf_max_fan_speed","text":"

      Maximum fan speed for shelf in rpm.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_max_temperature","title":"shelf_max_temperature","text":"

      Maximum temperature of all non-ambient sensors for shelf in Celsius.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_min_ambient_temperature","title":"shelf_min_ambient_temperature","text":"

      Minimum temperature of all ambient sensors for shelf in Celsius.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_min_fan_speed","title":"shelf_min_fan_speed","text":"

      Minimum fan speed for shelf in rpm.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_min_temperature","title":"shelf_min_temperature","text":"

      Minimum temperature of all non-ambient sensors for shelf in Celsius.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#shelf_power","title":"shelf_power","text":"

      Power consumed by shelf in Watts.

      API Endpoint Metric Template REST NA Harvest generatedUnit: Type: Base: conf/restperf/9.12.0/disk.yaml ZAPI NA Harvest generatedUnit: Type: Base: conf/zapiperf/cdot/9.8.0/disk.yaml"},{"location":"ontap-metrics/#smb2_close_latency","title":"smb2_close_latency","text":"

      Average latency for SMB2_COM_CLOSE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 close_latencyUnit: microsecType: averageBase: close_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 close_latencyUnit: microsecType: averageBase: close_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_close_latency_histogram","title":"smb2_close_latency_histogram","text":"

      Latency histogram for SMB2_COM_CLOSE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 close_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 close_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_close_ops","title":"smb2_close_ops","text":"

      Number of SMB2_COM_CLOSE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 close_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 close_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_create_latency","title":"smb2_create_latency","text":"

      Average latency for SMB2_COM_CREATE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 create_latencyUnit: microsecType: averageBase: create_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 create_latencyUnit: microsecType: averageBase: create_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_create_latency_histogram","title":"smb2_create_latency_histogram","text":"

      Latency histogram for SMB2_COM_CREATE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 create_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 create_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_create_ops","title":"smb2_create_ops","text":"

      Number of SMB2_COM_CREATE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 create_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 create_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_lock_latency","title":"smb2_lock_latency","text":"

      Average latency for SMB2_COM_LOCK operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 lock_latencyUnit: microsecType: averageBase: lock_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 lock_latencyUnit: microsecType: averageBase: lock_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_lock_latency_histogram","title":"smb2_lock_latency_histogram","text":"

      Latency histogram for SMB2_COM_LOCK operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 lock_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 lock_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_lock_ops","title":"smb2_lock_ops","text":"

      Number of SMB2_COM_LOCK operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 lock_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 lock_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_negotiate_latency","title":"smb2_negotiate_latency","text":"

      Average latency for SMB2_COM_NEGOTIATE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 negotiate_latencyUnit: microsecType: averageBase: negotiate_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 negotiate_latencyUnit: microsecType: averageBase: negotiate_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_negotiate_ops","title":"smb2_negotiate_ops","text":"

      Number of SMB2_COM_NEGOTIATE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 negotiate_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 negotiate_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_oplock_break_latency","title":"smb2_oplock_break_latency","text":"

      Average latency for SMB2_COM_OPLOCK_BREAK operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 oplock_break_latencyUnit: microsecType: averageBase: oplock_break_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 oplock_break_latencyUnit: microsecType: averageBase: oplock_break_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_oplock_break_latency_histogram","title":"smb2_oplock_break_latency_histogram","text":"

      Latency histogram for SMB2_COM_OPLOCK_BREAK operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 oplock_break_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 oplock_break_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_oplock_break_ops","title":"smb2_oplock_break_ops","text":"

      Number of SMB2_COM_OPLOCK_BREAK operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 oplock_break_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 oplock_break_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_directory_latency","title":"smb2_query_directory_latency","text":"

      Average latency for SMB2_COM_QUERY_DIRECTORY operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_directory_latencyUnit: microsecType: averageBase: query_directory_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_directory_latencyUnit: microsecType: averageBase: query_directory_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_directory_latency_histogram","title":"smb2_query_directory_latency_histogram","text":"

      Latency histogram for SMB2_COM_QUERY_DIRECTORY operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_directory_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_directory_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_directory_ops","title":"smb2_query_directory_ops","text":"

      Number of SMB2_COM_QUERY_DIRECTORY operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_directory_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_directory_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_info_latency","title":"smb2_query_info_latency","text":"

      Average latency for SMB2_COM_QUERY_INFO operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_info_latencyUnit: microsecType: averageBase: query_info_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_info_latencyUnit: microsecType: averageBase: query_info_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_info_latency_histogram","title":"smb2_query_info_latency_histogram","text":"

      Latency histogram for SMB2_COM_QUERY_INFO operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_info_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_info_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_query_info_ops","title":"smb2_query_info_ops","text":"

      Number of SMB2_COM_QUERY_INFO operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 query_info_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 query_info_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_read_latency","title":"smb2_read_latency","text":"

      Average latency for SMB2_COM_READ operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 read_latencyUnit: microsecType: averageBase: read_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_read_ops","title":"smb2_read_ops","text":"

      Number of SMB2_COM_READ operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 read_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_session_setup_latency","title":"smb2_session_setup_latency","text":"

      Average latency for SMB2_COM_SESSION_SETUP operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 session_setup_latencyUnit: microsecType: averageBase: session_setup_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 session_setup_latencyUnit: microsecType: averageBase: session_setup_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_session_setup_latency_histogram","title":"smb2_session_setup_latency_histogram","text":"

      Latency histogram for SMB2_COM_SESSION_SETUP operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 session_setup_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 session_setup_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_session_setup_ops","title":"smb2_session_setup_ops","text":"

      Number of SMB2_COM_SESSION_SETUP operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 session_setup_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 session_setup_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_set_info_latency","title":"smb2_set_info_latency","text":"

      Average latency for SMB2_COM_SET_INFO operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 set_info_latencyUnit: microsecType: averageBase: set_info_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 set_info_latencyUnit: microsecType: averageBase: set_info_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_set_info_latency_histogram","title":"smb2_set_info_latency_histogram","text":"

      Latency histogram for SMB2_COM_SET_INFO operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 set_info_latency_histogramUnit: noneType: deltaBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 set_info_latency_histogramUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_set_info_ops","title":"smb2_set_info_ops","text":"

      Number of SMB2_COM_SET_INFO operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 set_info_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 set_info_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_tree_connect_latency","title":"smb2_tree_connect_latency","text":"

      Average latency for SMB2_COM_TREE_CONNECT operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 tree_connect_latencyUnit: microsecType: averageBase: tree_connect_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 tree_connect_latencyUnit: microsecType: averageBase: tree_connect_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_tree_connect_ops","title":"smb2_tree_connect_ops","text":"

      Number of SMB2_COM_TREE_CONNECT operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 tree_connect_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 tree_connect_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_write_latency","title":"smb2_write_latency","text":"

      Average latency for SMB2_COM_WRITE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 write_latencyUnit: microsecType: averageBase: write_ops conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 write_latencyUnit: microsecType: averageBase: write_latency_base conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#smb2_write_ops","title":"smb2_write_ops","text":"

      Number of SMB2_COM_WRITE operations

      API Endpoint Metric Template REST api/cluster/counter/tables/smb2 write_opsUnit: per_secType: rateBase: conf/restperf/9.14.1/smb2.yaml ZAPI perf-object-get-instances smb2 write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/smb2.yaml"},{"location":"ontap-metrics/#snapmirror_break_failed_count","title":"snapmirror_break_failed_count","text":"

      The number of failed SnapMirror break operations for the relationship

      API Endpoint Metric Template REST api/private/cli/snapmirror break_failed_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.break-failed-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_break_successful_count","title":"snapmirror_break_successful_count","text":"

      The number of successful SnapMirror break operations for the relationship

      API Endpoint Metric Template REST api/private/cli/snapmirror break_successful_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.break-successful-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_lag_time","title":"snapmirror_lag_time","text":"

      Amount of time since the last snapmirror transfer in seconds

      API Endpoint Metric Template REST api/private/cli/snapmirror lag_time conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.lag-time conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_last_transfer_duration","title":"snapmirror_last_transfer_duration","text":"

      Duration of the last SnapMirror transfer in seconds

      API Endpoint Metric Template REST api/private/cli/snapmirror last_transfer_duration conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.last-transfer-duration conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_last_transfer_end_timestamp","title":"snapmirror_last_transfer_end_timestamp","text":"

      The Timestamp of the end of the last transfer

      API Endpoint Metric Template REST api/private/cli/snapmirror last_transfer_end_timestamp conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.last-transfer-end-timestamp conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_last_transfer_size","title":"snapmirror_last_transfer_size","text":"

      Size in kilobytes (1024 bytes) of the last transfer

      API Endpoint Metric Template REST api/private/cli/snapmirror last_transfer_size conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.last-transfer-size conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_newest_snapshot_timestamp","title":"snapmirror_newest_snapshot_timestamp","text":"

      The timestamp of the newest Snapshot copy on the destination volume

      API Endpoint Metric Template REST api/private/cli/snapmirror newest_snapshot_timestamp conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.newest-snapshot-timestamp conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_resync_failed_count","title":"snapmirror_resync_failed_count","text":"

      The number of failed SnapMirror resync operations for the relationship

      API Endpoint Metric Template REST api/private/cli/snapmirror resync_failed_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.resync-failed-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_resync_successful_count","title":"snapmirror_resync_successful_count","text":"

      The number of successful SnapMirror resync operations for the relationship

      API Endpoint Metric Template REST api/private/cli/snapmirror resync_successful_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.resync-successful-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_total_transfer_bytes","title":"snapmirror_total_transfer_bytes","text":"

      Cumulative bytes transferred for the relationship

      API Endpoint Metric Template REST api/private/cli/snapmirror total_transfer_bytes conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.total-transfer-bytes conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_total_transfer_time_secs","title":"snapmirror_total_transfer_time_secs","text":"

      Cumulative total transfer time in seconds for the relationship

      API Endpoint Metric Template REST api/private/cli/snapmirror total_transfer_time_secs conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.total-transfer-time-secs conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_update_failed_count","title":"snapmirror_update_failed_count","text":"

      The number of successful SnapMirror update operations for the relationship

      API Endpoint Metric Template REST api/private/cli/snapmirror update_failed_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.update-failed-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapmirror_update_successful_count","title":"snapmirror_update_successful_count","text":"

      Number of Successful Updates

      API Endpoint Metric Template REST api/private/cli/snapmirror update_successful_count conf/rest/9.12.0/snapmirror.yaml ZAPI snapmirror-get-iter snapmirror-info.update-successful-count conf/zapi/cdot/9.8.0/snapmirror.yaml"},{"location":"ontap-metrics/#snapshot_policy_total_schedules","title":"snapshot_policy_total_schedules","text":"

      Total Number of Schedules in this Policy

      API Endpoint Metric Template REST api/private/cli/snapshot/policy total_schedules conf/rest/9.12.0/snapshotpolicy.yaml ZAPI snapshot-policy-get-iter snapshot-policy-info.total-schedules conf/zapi/cdot/9.8.0/snapshotpolicy.yaml"},{"location":"ontap-metrics/#svm_cifs_connections","title":"svm_cifs_connections","text":"

      Number of connections

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs connectionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver connectionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_established_sessions","title":"svm_cifs_established_sessions","text":"

      Number of established SMB and SMB2 sessions

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs established_sessionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver established_sessionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_latency","title":"svm_cifs_latency","text":"

      Average latency for CIFS operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs latencyUnit: microsecType: averageBase: latency_base conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_latencyUnit: microsecType: averageBase: cifs_latency_base conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_op_count","title":"svm_cifs_op_count","text":"

      Array of select CIFS operation counts

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs op_countUnit: noneType: rateBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_op_countUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_open_files","title":"svm_cifs_open_files","text":"

      Number of open files over SMB and SMB2

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs open_filesUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver open_filesUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_ops","title":"svm_cifs_ops","text":"

      Total number of CIFS operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_read_latency","title":"svm_cifs_read_latency","text":"

      Average latency for CIFS read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs average_read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_read_latencyUnit: microsecType: averageBase: cifs_read_ops conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_read_ops","title":"svm_cifs_read_ops","text":"

      Total number of CIFS read operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_signed_sessions","title":"svm_cifs_signed_sessions","text":"

      Number of signed SMB and SMB2 sessions.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs signed_sessionsUnit: noneType: rawBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver signed_sessionsUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_write_latency","title":"svm_cifs_write_latency","text":"

      Average latency for CIFS write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs average_write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_write_latencyUnit: microsecType: averageBase: cifs_write_ops conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_cifs_write_ops","title":"svm_cifs_write_ops","text":"

      Total number of CIFS write operations

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_cifs total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/cifs_vserver.yaml ZAPI perf-object-get-instances cifs:vserver cifs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/cifs_vserver.yaml"},{"location":"ontap-metrics/#svm_nfs_access_avg_latency","title":"svm_nfs_access_avg_latency","text":"

      Average latency of Access procedure requests. The counter keeps track of the average response time of Access requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 access.average_latencyUnit: microsecType: averageBase: access.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 access_avg_latencyUnit: microsecType: average,no-zero-valuesBase: access_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_access_total","title":"svm_nfs_access_total","text":"

      Total number of Access procedure requests. It is the total number of access success and access error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 access.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 access_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_backchannel_ctl_avg_latency","title":"svm_nfs_backchannel_ctl_avg_latency","text":"

      Average latency of BACKCHANNEL_CTL operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 backchannel_ctl.average_latencyUnit: microsecType: averageBase: backchannel_ctl.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 backchannel_ctl.average_latencyUnit: microsecType: averageBase: backchannel_ctl.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 backchannel_ctl_avg_latencyUnit: microsecType: average,no-zero-valuesBase: backchannel_ctl_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 backchannel_ctl_avg_latencyUnit: microsecType: average,no-zero-valuesBase: backchannel_ctl_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_backchannel_ctl_total","title":"svm_nfs_backchannel_ctl_total","text":"

      Total number of BACKCHANNEL_CTL operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 backchannel_ctl.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 backchannel_ctl.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 backchannel_ctl_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 backchannel_ctl_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_bind_conn_to_session_avg_latency","title":"svm_nfs_bind_conn_to_session_avg_latency","text":"

      Average latency of BIND_CONN_TO_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 bind_connections_to_session.average_latencyUnit: microsecType: averageBase: bind_connections_to_session.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 bind_conn_to_session.average_latencyUnit: microsecType: averageBase: bind_conn_to_session.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 bind_conn_to_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: bind_conn_to_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 bind_conn_to_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: bind_conn_to_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_bind_conn_to_session_total","title":"svm_nfs_bind_conn_to_session_total","text":"

      Total number of BIND_CONN_TO_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 bind_connections_to_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 bind_conn_to_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 bind_conn_to_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 bind_conn_to_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_close_avg_latency","title":"svm_nfs_close_avg_latency","text":"

      Average latency of CLOSE procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 close.average_latencyUnit: microsecType: averageBase: close.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 close_avg_latencyUnit: microsecType: average,no-zero-valuesBase: close_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_close_total","title":"svm_nfs_close_total","text":"

      Total number of CLOSE procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 close.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 close_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_commit_avg_latency","title":"svm_nfs_commit_avg_latency","text":"

      Average latency of Commit procedure requests. The counter keeps track of the average response time of Commit requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 commit.average_latencyUnit: microsecType: averageBase: commit.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 commit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: commit_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_commit_total","title":"svm_nfs_commit_total","text":"

      Total number of Commit procedure requests. It is the total number of Commit success and Commit error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 commit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 commit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_create_avg_latency","title":"svm_nfs_create_avg_latency","text":"

      Average latency of Create procedure requests. The counter keeps track of the average response time of Create requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 create.average_latencyUnit: microsecType: averageBase: create.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 create_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_create_session_avg_latency","title":"svm_nfs_create_session_avg_latency","text":"

      Average latency of CREATE_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 create_session.average_latencyUnit: microsecType: averageBase: create_session.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 create_session.average_latencyUnit: microsecType: averageBase: create_session.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 create_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 create_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: create_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_create_session_total","title":"svm_nfs_create_session_total","text":"

      Total number of CREATE_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 create_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 create_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 create_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 create_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_create_total","title":"svm_nfs_create_total","text":"

      Total number Create of procedure requests. It is the total number of create success and create error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 create.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 create_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_delegpurge_avg_latency","title":"svm_nfs_delegpurge_avg_latency","text":"

      Average latency of DELEGPURGE procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 delegpurge.average_latencyUnit: microsecType: averageBase: delegpurge.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 delegpurge_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegpurge_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_delegpurge_total","title":"svm_nfs_delegpurge_total","text":"

      Total number of DELEGPURGE procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 delegpurge.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 delegpurge_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_delegreturn_avg_latency","title":"svm_nfs_delegreturn_avg_latency","text":"

      Average latency of DELEGRETURN procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 delegreturn.average_latencyUnit: microsecType: averageBase: delegreturn.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 delegreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: delegreturn_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_delegreturn_total","title":"svm_nfs_delegreturn_total","text":"

      Total number of DELEGRETURN procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 delegreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 delegreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_destroy_clientid_avg_latency","title":"svm_nfs_destroy_clientid_avg_latency","text":"

      Average latency of DESTROY_CLIENTID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 destroy_clientid.average_latencyUnit: microsecType: averageBase: destroy_clientid.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 destroy_clientid.average_latencyUnit: microsecType: averageBase: destroy_clientid.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 destroy_clientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_clientid_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 destroy_clientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_clientid_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_destroy_clientid_total","title":"svm_nfs_destroy_clientid_total","text":"

      Total number of DESTROY_CLIENTID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 destroy_clientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 destroy_clientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 destroy_clientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 destroy_clientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_destroy_session_avg_latency","title":"svm_nfs_destroy_session_avg_latency","text":"

      Average latency of DESTROY_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 destroy_session.average_latencyUnit: microsecType: averageBase: destroy_session.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 destroy_session.average_latencyUnit: microsecType: averageBase: destroy_session.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 destroy_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_session_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 destroy_session_avg_latencyUnit: microsecType: average,no-zero-valuesBase: destroy_session_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_destroy_session_total","title":"svm_nfs_destroy_session_total","text":"

      Total number of DESTROY_SESSION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 destroy_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 destroy_session.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 destroy_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 destroy_session_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_exchange_id_avg_latency","title":"svm_nfs_exchange_id_avg_latency","text":"

      Average latency of EXCHANGE_ID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 exchange_id.average_latencyUnit: microsecType: averageBase: exchange_id.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 exchange_id.average_latencyUnit: microsecType: averageBase: exchange_id.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 exchange_id_avg_latencyUnit: microsecType: average,no-zero-valuesBase: exchange_id_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 exchange_id_avg_latencyUnit: microsecType: average,no-zero-valuesBase: exchange_id_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_exchange_id_total","title":"svm_nfs_exchange_id_total","text":"

      Total number of EXCHANGE_ID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 exchange_id.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 exchange_id.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 exchange_id_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 exchange_id_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_free_stateid_avg_latency","title":"svm_nfs_free_stateid_avg_latency","text":"

      Average latency of FREE_STATEID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 free_stateid.average_latencyUnit: microsecType: averageBase: free_stateid.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 free_stateid.average_latencyUnit: microsecType: averageBase: free_stateid.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 free_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: free_stateid_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 free_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: free_stateid_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_free_stateid_total","title":"svm_nfs_free_stateid_total","text":"

      Total number of FREE_STATEID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 free_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 free_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 free_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 free_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_fsinfo_avg_latency","title":"svm_nfs_fsinfo_avg_latency","text":"

      Average latency of FSInfo procedure requests. The counter keeps track of the average response time of FSInfo requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 fsinfo.average_latencyUnit: microsecType: averageBase: fsinfo.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 fsinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: fsinfo_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_fsinfo_total","title":"svm_nfs_fsinfo_total","text":"

      Total number FSInfo of procedure requests. It is the total number of FSInfo success and FSInfo error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 fsinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 fsinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_fsstat_avg_latency","title":"svm_nfs_fsstat_avg_latency","text":"

      Average latency of FSStat procedure requests. The counter keeps track of the average response time of FSStat requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 fsstat.average_latencyUnit: microsecType: averageBase: fsstat.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 fsstat_avg_latencyUnit: microsecType: average,no-zero-valuesBase: fsstat_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_fsstat_total","title":"svm_nfs_fsstat_total","text":"

      Total number FSStat of procedure requests. It is the total number of FSStat success and FSStat error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 fsstat.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 fsstat_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_get_dir_delegation_avg_latency","title":"svm_nfs_get_dir_delegation_avg_latency","text":"

      Average latency of GET_DIR_DELEGATION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 get_dir_delegation.average_latencyUnit: microsecType: averageBase: get_dir_delegation.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 get_dir_delegation.average_latencyUnit: microsecType: averageBase: get_dir_delegation.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 get_dir_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: get_dir_delegation_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 get_dir_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: get_dir_delegation_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_get_dir_delegation_total","title":"svm_nfs_get_dir_delegation_total","text":"

      Total number of GET_DIR_DELEGATION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 get_dir_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 get_dir_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 get_dir_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 get_dir_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getattr_avg_latency","title":"svm_nfs_getattr_avg_latency","text":"

      Average latency of GetAttr procedure requests. This counter keeps track of the average response time of GetAttr requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getattr.average_latencyUnit: microsecType: averageBase: getattr.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getattr_total","title":"svm_nfs_getattr_total","text":"

      Total number of Getattr procedure requests. It is the total number of getattr success and getattr error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getdeviceinfo_avg_latency","title":"svm_nfs_getdeviceinfo_avg_latency","text":"

      Average latency of GETDEVICEINFO operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 getdeviceinfo.average_latencyUnit: microsecType: averageBase: getdeviceinfo.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getdeviceinfo.average_latencyUnit: microsecType: averageBase: getdeviceinfo.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 getdeviceinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdeviceinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getdeviceinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdeviceinfo_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getdeviceinfo_total","title":"svm_nfs_getdeviceinfo_total","text":"

      Total number of GETDEVICEINFO operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 getdeviceinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getdeviceinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 getdeviceinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getdeviceinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getdevicelist_avg_latency","title":"svm_nfs_getdevicelist_avg_latency","text":"

      Average latency of GETDEVICELIST operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 getdevicelist.average_latencyUnit: microsecType: averageBase: getdevicelist.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getdevicelist.average_latencyUnit: microsecType: averageBase: getdevicelist.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 getdevicelist_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdevicelist_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getdevicelist_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getdevicelist_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getdevicelist_total","title":"svm_nfs_getdevicelist_total","text":"

      Total number of GETDEVICELIST operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 getdevicelist.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getdevicelist.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 getdevicelist_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getdevicelist_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getfh_avg_latency","title":"svm_nfs_getfh_avg_latency","text":"

      Average latency of GETFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getfh.average_latencyUnit: microsecType: averageBase: getfh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: getfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_getfh_total","title":"svm_nfs_getfh_total","text":"

      Total number of GETFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 getfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 getfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_latency","title":"svm_nfs_latency","text":"

      Average latency of NFSv3 requests. This counter keeps track of the average response time of NFSv3 requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 latencyUnit: microsecType: average,no-zero-valuesBase: total_ops conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutcommit_avg_latency","title":"svm_nfs_layoutcommit_avg_latency","text":"

      Average latency of LAYOUTCOMMIT operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutcommit.average_latencyUnit: microsecType: averageBase: layoutcommit.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutcommit.average_latencyUnit: microsecType: averageBase: layoutcommit.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutcommit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutcommit_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutcommit_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutcommit_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutcommit_total","title":"svm_nfs_layoutcommit_total","text":"

      Total number of LAYOUTCOMMIT operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutcommit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutcommit.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutcommit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutcommit_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutget_avg_latency","title":"svm_nfs_layoutget_avg_latency","text":"

      Average latency of LAYOUTGET operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutget.average_latencyUnit: microsecType: averageBase: layoutget.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutget.average_latencyUnit: microsecType: averageBase: layoutget.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutget_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutget_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutget_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutget_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutget_total","title":"svm_nfs_layoutget_total","text":"

      Total number of LAYOUTGET operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutget.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutget.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutget_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutget_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutreturn_avg_latency","title":"svm_nfs_layoutreturn_avg_latency","text":"

      Average latency of LAYOUTRETURN operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutreturn.average_latencyUnit: microsecType: averageBase: layoutreturn.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutreturn.average_latencyUnit: microsecType: averageBase: layoutreturn.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutreturn_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutreturn_avg_latencyUnit: microsecType: average,no-zero-valuesBase: layoutreturn_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_layoutreturn_total","title":"svm_nfs_layoutreturn_total","text":"

      Total number of LAYOUTRETURN operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 layoutreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 layoutreturn.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 layoutreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 layoutreturn_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_link_avg_latency","title":"svm_nfs_link_avg_latency","text":"

      Average latency of Link procedure requests. The counter keeps track of the average response time of Link requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 link.average_latencyUnit: microsecType: averageBase: link.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 link_avg_latencyUnit: microsecType: average,no-zero-valuesBase: link_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_link_total","title":"svm_nfs_link_total","text":"

      Total number Link of procedure requests. It is the total number of Link success and Link error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 link.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 link_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lock_avg_latency","title":"svm_nfs_lock_avg_latency","text":"

      Average latency of LOCK procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lock.average_latencyUnit: microsecType: averageBase: lock.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lock_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lock_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lock_total","title":"svm_nfs_lock_total","text":"

      Total number of LOCK procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lock.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lock_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lockt_avg_latency","title":"svm_nfs_lockt_avg_latency","text":"

      Average latency of LOCKT procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lockt.average_latencyUnit: microsecType: averageBase: lockt.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lockt_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lockt_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lockt_total","title":"svm_nfs_lockt_total","text":"

      Total number of LOCKT procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lockt.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lockt_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_locku_avg_latency","title":"svm_nfs_locku_avg_latency","text":"

      Average latency of LOCKU procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 locku.average_latencyUnit: microsecType: averageBase: locku.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 locku_avg_latencyUnit: microsecType: average,no-zero-valuesBase: locku_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_locku_total","title":"svm_nfs_locku_total","text":"

      Total number of LOCKU procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 locku.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 locku_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lookup_avg_latency","title":"svm_nfs_lookup_avg_latency","text":"

      Average latency of LookUp procedure requests. This shows the average time it takes for the LookUp operation to reply to the request.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lookup.average_latencyUnit: microsecType: averageBase: lookup.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lookup_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookup_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lookup_total","title":"svm_nfs_lookup_total","text":"

      Total number of Lookup procedure requests. It is the total number of lookup success and lookup error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lookup.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lookup_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lookupp_avg_latency","title":"svm_nfs_lookupp_avg_latency","text":"

      Average latency of LOOKUPP procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lookupp.average_latencyUnit: microsecType: averageBase: lookupp.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lookupp_avg_latencyUnit: microsecType: average,no-zero-valuesBase: lookupp_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_lookupp_total","title":"svm_nfs_lookupp_total","text":"

      Total number of LOOKUPP procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 lookupp.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 lookupp_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_mkdir_avg_latency","title":"svm_nfs_mkdir_avg_latency","text":"

      Average latency of MkDir procedure requests. The counter keeps track of the average response time of MkDir requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 mkdir.average_latencyUnit: microsecType: averageBase: mkdir.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 mkdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: mkdir_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_mkdir_total","title":"svm_nfs_mkdir_total","text":"

      Total number MkDir of procedure requests. It is the total number of MkDir success and MkDir error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 mkdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 mkdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_mknod_avg_latency","title":"svm_nfs_mknod_avg_latency","text":"

      Average latency of MkNod procedure requests. The counter keeps track of the average response time of MkNod requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 mknod.average_latencyUnit: microsecType: averageBase: mknod.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 mknod_avg_latencyUnit: microsecType: average,no-zero-valuesBase: mknod_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_mknod_total","title":"svm_nfs_mknod_total","text":"

      Total number MkNod of procedure requests. It is the total number of MkNod success and MkNod error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 mknod.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 mknod_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_null_avg_latency","title":"svm_nfs_null_avg_latency","text":"

      Average latency of Null procedure requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 null.average_latencyUnit: microsecType: averageBase: null.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 null_avg_latencyUnit: microsecType: average,no-zero-valuesBase: null_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_null_total","title":"svm_nfs_null_total","text":"

      Total number of Null procedure requests. It is the total of null success and null error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 null.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 null_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_nverify_avg_latency","title":"svm_nfs_nverify_avg_latency","text":"

      Average latency of NVERIFY procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 nverify.average_latencyUnit: microsecType: averageBase: nverify.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nverify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: nverify_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_nverify_total","title":"svm_nfs_nverify_total","text":"

      Total number of NVERIFY procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 nverify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nverify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_open_avg_latency","title":"svm_nfs_open_avg_latency","text":"

      Average latency of OPEN procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 open.average_latencyUnit: microsecType: averageBase: open.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 open_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_open_confirm_avg_latency","title":"svm_nfs_open_confirm_avg_latency","text":"

      Average latency of OPEN_CONFIRM procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open_confirm.average_latencyUnit: microsecType: averageBase: open_confirm.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 open_confirm_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_confirm_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_open_confirm_total","title":"svm_nfs_open_confirm_total","text":"

      Total number of OPEN_CONFIRM procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open_confirm.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 open_confirm_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_open_downgrade_avg_latency","title":"svm_nfs_open_downgrade_avg_latency","text":"

      Average latency of OPEN_DOWNGRADE procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 open_downgrade.average_latencyUnit: microsecType: averageBase: open_downgrade.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 open_downgrade_avg_latencyUnit: microsecType: average,no-zero-valuesBase: open_downgrade_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_open_downgrade_total","title":"svm_nfs_open_downgrade_total","text":"

      Total number of OPEN_DOWNGRADE procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 open_downgrade.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 open_downgrade_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_open_total","title":"svm_nfs_open_total","text":"

      Total number of OPEN procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 open.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 open_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_openattr_avg_latency","title":"svm_nfs_openattr_avg_latency","text":"

      Average latency of OPENATTR procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 openattr.average_latencyUnit: microsecType: averageBase: openattr.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 openattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: openattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_openattr_total","title":"svm_nfs_openattr_total","text":"

      Total number of OPENATTR procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 openattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 openattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_ops","title":"svm_nfs_ops","text":"

      Total number of NFSv3 procedure requests per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 total_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_pathconf_avg_latency","title":"svm_nfs_pathconf_avg_latency","text":"

      Average latency of PathConf procedure requests. The counter keeps track of the average response time of PathConf requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 pathconf.average_latencyUnit: microsecType: averageBase: pathconf.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 pathconf_avg_latencyUnit: microsecType: average,no-zero-valuesBase: pathconf_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_pathconf_total","title":"svm_nfs_pathconf_total","text":"

      Total number PathConf of procedure requests. It is the total number of PathConf success and PathConf error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 pathconf.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 pathconf_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_putfh_avg_latency","title":"svm_nfs_putfh_avg_latency","text":"

      Average latency of PUTFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putfh.average_latencyUnit: microsecType: averageBase: putfh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putfh.average_latencyUnit: noneType: deltaBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putfh.average_latencyUnit: microsecType: averageBase: putfh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putfh_total","title":"svm_nfs_putfh_total","text":"

      Total number of PUTFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putpubfh_avg_latency","title":"svm_nfs_putpubfh_avg_latency","text":"

      Average latency of PUTPUBFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putpubfh.average_latencyUnit: microsecType: averageBase: putpubfh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putpubfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putpubfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putpubfh_total","title":"svm_nfs_putpubfh_total","text":"

      Total number of PUTPUBFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putpubfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putpubfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putrootfh_avg_latency","title":"svm_nfs_putrootfh_avg_latency","text":"

      Average latency of PUTROOTFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putrootfh.average_latencyUnit: microsecType: averageBase: putrootfh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putrootfh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: putrootfh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_putrootfh_total","title":"svm_nfs_putrootfh_total","text":"

      Total number of PUTROOTFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 putrootfh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 putrootfh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_read_avg_latency","title":"svm_nfs_read_avg_latency","text":"

      Average latency of Read procedure requests. The counter keeps track of the average response time of Read requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 read.average_latencyUnit: microsecType: averageBase: read.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 read_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_read_ops","title":"svm_nfs_read_ops","text":"

      Total observed NFSv3 read operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_read_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_read_symlink_avg_latency","title":"svm_nfs_read_symlink_avg_latency","text":"

      Average latency of ReadSymLink procedure requests. The counter keeps track of the average response time of ReadSymLink requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read_symlink.average_latencyUnit: microsecType: averageBase: read_symlink.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 read_symlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: read_symlink_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_read_symlink_total","title":"svm_nfs_read_symlink_total","text":"

      Total number of ReadSymLink procedure requests. It is the total number of read symlink success and read symlink error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read_symlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 read_symlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_read_throughput","title":"svm_nfs_read_throughput","text":"

      Rate of NFSv3 read data transfers per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 total.read_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 nfs4_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nfs41_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nfs42_read_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_read_total","title":"svm_nfs_read_total","text":"

      Total number Read of procedure requests. It is the total number of read success and read error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 read.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 read_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_readdir_avg_latency","title":"svm_nfs_readdir_avg_latency","text":"

      Average latency of ReadDir procedure requests. The counter keeps track of the average response time of ReadDir requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 readdir.average_latencyUnit: microsecType: averageBase: readdir.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 readdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdir_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_readdir_total","title":"svm_nfs_readdir_total","text":"

      Total number ReadDir of procedure requests. It is the total number of ReadDir success and ReadDir error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 readdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 readdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_readdirplus_avg_latency","title":"svm_nfs_readdirplus_avg_latency","text":"

      Average latency of ReadDirPlus procedure requests. The counter keeps track of the average response time of ReadDirPlus requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 readdirplus.average_latencyUnit: microsecType: averageBase: readdirplus.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 readdirplus_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readdirplus_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_readdirplus_total","title":"svm_nfs_readdirplus_total","text":"

      Total number ReadDirPlus of procedure requests. It is the total number of ReadDirPlus success and ReadDirPlus error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 readdirplus.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 readdirplus_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_readlink_avg_latency","title":"svm_nfs_readlink_avg_latency","text":"

      Average latency of READLINK procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 readlink.average_latencyUnit: microsecType: averageBase: readlink.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 readlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: readlink_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_readlink_total","title":"svm_nfs_readlink_total","text":"

      Total number of READLINK procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 readlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 readlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_reclaim_complete_avg_latency","title":"svm_nfs_reclaim_complete_avg_latency","text":"

      Average latency of RECLAIM_COMPLETE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 reclaim_complete.average_latencyUnit: microsecType: averageBase: reclaim_complete.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 reclaim_complete.average_latencyUnit: microsecType: averageBase: reclaim_complete.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 reclaim_complete_avg_latencyUnit: microsecType: average,no-zero-valuesBase: reclaim_complete_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 reclaim_complete_avg_latencyUnit: microsecType: average,no-zero-valuesBase: reclaim_complete_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_reclaim_complete_total","title":"svm_nfs_reclaim_complete_total","text":"

      Total number of RECLAIM_COMPLETE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 reclaim_complete.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 reclaim_complete.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 reclaim_complete_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 reclaim_complete_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_release_lock_owner_avg_latency","title":"svm_nfs_release_lock_owner_avg_latency","text":"

      Average Latency of RELEASE_LOCKOWNER procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 release_lock_owner.average_latencyUnit: microsecType: averageBase: release_lock_owner.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 release_lock_owner_avg_latencyUnit: microsecType: average,no-zero-valuesBase: release_lock_owner_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_release_lock_owner_total","title":"svm_nfs_release_lock_owner_total","text":"

      Total number of RELEASE_LOCKOWNER procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 release_lock_owner.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 release_lock_owner_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_remove_avg_latency","title":"svm_nfs_remove_avg_latency","text":"

      Average latency of Remove procedure requests. The counter keeps track of the average response time of Remove requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 remove.average_latencyUnit: microsecType: averageBase: remove.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 remove_avg_latencyUnit: microsecType: average,no-zero-valuesBase: remove_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_remove_total","title":"svm_nfs_remove_total","text":"

      Total number Remove of procedure requests. It is the total number of Remove success and Remove error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 remove.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 remove_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_rename_avg_latency","title":"svm_nfs_rename_avg_latency","text":"

      Average latency of Rename procedure requests. The counter keeps track of the average response time of Rename requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 rename.average_latencyUnit: microsecType: averageBase: rename.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 rename_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rename_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_rename_total","title":"svm_nfs_rename_total","text":"

      Total number Rename of procedure requests. It is the total number of Rename success and Rename error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 rename.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 rename_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_renew_avg_latency","title":"svm_nfs_renew_avg_latency","text":"

      Average latency of RENEW procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 renew.average_latencyUnit: microsecType: averageBase: renew.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 renew_avg_latencyUnit: microsecType: average,no-zero-valuesBase: renew_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_renew_total","title":"svm_nfs_renew_total","text":"

      Total number of RENEW procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 renew.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 renew_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_restorefh_avg_latency","title":"svm_nfs_restorefh_avg_latency","text":"

      Average latency of RESTOREFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 restorefh.average_latencyUnit: microsecType: averageBase: restorefh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 restorefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: restorefh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_restorefh_total","title":"svm_nfs_restorefh_total","text":"

      Total number of RESTOREFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 restorefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 restorefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_rmdir_avg_latency","title":"svm_nfs_rmdir_avg_latency","text":"

      Average latency of RmDir procedure requests. The counter keeps track of the average response time of RmDir requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 rmdir.average_latencyUnit: microsecType: averageBase: rmdir.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 rmdir_avg_latencyUnit: microsecType: average,no-zero-valuesBase: rmdir_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_rmdir_total","title":"svm_nfs_rmdir_total","text":"

      Total number RmDir of procedure requests. It is the total number of RmDir success and RmDir error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 rmdir.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 rmdir_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_savefh_avg_latency","title":"svm_nfs_savefh_avg_latency","text":"

      Average latency of SAVEFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 savefh.average_latencyUnit: microsecType: averageBase: savefh.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 savefh_avg_latencyUnit: microsecType: average,no-zero-valuesBase: savefh_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_savefh_total","title":"svm_nfs_savefh_total","text":"

      Total number of SAVEFH procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 savefh.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 savefh_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_secinfo_avg_latency","title":"svm_nfs_secinfo_avg_latency","text":"

      Average latency of SECINFO procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 secinfo.average_latencyUnit: microsecType: averageBase: secinfo.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 secinfo_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_secinfo_no_name_avg_latency","title":"svm_nfs_secinfo_no_name_avg_latency","text":"

      Average latency of SECINFO_NO_NAME operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 secinfo_no_name.average_latencyUnit: microsecType: averageBase: secinfo_no_name.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 secinfo_no_name.average_latencyUnit: microsecType: averageBase: secinfo_no_name.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 secinfo_no_name_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_no_name_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 secinfo_no_name_avg_latencyUnit: microsecType: average,no-zero-valuesBase: secinfo_no_name_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_secinfo_no_name_total","title":"svm_nfs_secinfo_no_name_total","text":"

      Total number of SECINFO_NO_NAME operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 secinfo_no_name.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 secinfo_no_name.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 secinfo_no_name_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 secinfo_no_name_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_secinfo_total","title":"svm_nfs_secinfo_total","text":"

      Total number of SECINFO procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 secinfo.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 secinfo_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_sequence_avg_latency","title":"svm_nfs_sequence_avg_latency","text":"

      Average latency of SEQUENCE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 sequence.average_latencyUnit: microsecType: averageBase: sequence.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 sequence.average_latencyUnit: microsecType: averageBase: sequence.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 sequence_avg_latencyUnit: microsecType: average,no-zero-valuesBase: sequence_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 sequence_avg_latencyUnit: microsecType: average,no-zero-valuesBase: sequence_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_sequence_total","title":"svm_nfs_sequence_total","text":"

      Total number of SEQUENCE operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 sequence.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 sequence.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 sequence_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 sequence_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_set_ssv_avg_latency","title":"svm_nfs_set_ssv_avg_latency","text":"

      Average latency of SET_SSV operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 set_ssv.average_latencyUnit: microsecType: averageBase: set_ssv.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 set_ssv.average_latencyUnit: microsecType: averageBase: set_ssv.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 set_ssv_avg_latencyUnit: microsecType: average,no-zero-valuesBase: set_ssv_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 set_ssv_avg_latencyUnit: microsecType: average,no-zero-valuesBase: set_ssv_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_set_ssv_total","title":"svm_nfs_set_ssv_total","text":"

      Total number of SET_SSV operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 set_ssv.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 set_ssv.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 set_ssv_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 set_ssv_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_setattr_avg_latency","title":"svm_nfs_setattr_avg_latency","text":"

      Average latency of SetAttr procedure requests. The counter keeps track of the average response time of SetAttr requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 setattr.average_latencyUnit: microsecType: averageBase: setattr.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 setattr_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setattr_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_setattr_total","title":"svm_nfs_setattr_total","text":"

      Total number of Setattr procedure requests. It is the total number of Setattr success and setattr error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 setattr.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 setattr_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_setclientid_avg_latency","title":"svm_nfs_setclientid_avg_latency","text":"

      Average latency of SETCLIENTID procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 setclientid.average_latencyUnit: microsecType: averageBase: setclientid.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 setclientid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setclientid_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_setclientid_confirm_avg_latency","title":"svm_nfs_setclientid_confirm_avg_latency","text":"

      Average latency of SETCLIENTID_CONFIRM procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 setclientid_confirm.average_latencyUnit: microsecType: averageBase: setclientid_confirm.total conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 setclientid_confirm_avg_latencyUnit: microsecType: average,no-zero-valuesBase: setclientid_confirm_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_setclientid_confirm_total","title":"svm_nfs_setclientid_confirm_total","text":"

      Total number of SETCLIENTID_CONFIRM procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 setclientid_confirm.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 setclientid_confirm_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_setclientid_total","title":"svm_nfs_setclientid_total","text":"

      Total number of SETCLIENTID procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 setclientid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4 setclientid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml"},{"location":"ontap-metrics/#svm_nfs_symlink_avg_latency","title":"svm_nfs_symlink_avg_latency","text":"

      Average latency of SymLink procedure requests. The counter keeps track of the average response time of SymLink requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 symlink.average_latencyUnit: microsecType: averageBase: symlink.total conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 symlink_avg_latencyUnit: microsecType: average,no-zero-valuesBase: symlink_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_symlink_total","title":"svm_nfs_symlink_total","text":"

      Total number SymLink of procedure requests. It is the total number of SymLink success and create SymLink requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 symlink.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 symlink_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_test_stateid_avg_latency","title":"svm_nfs_test_stateid_avg_latency","text":"

      Average latency of TEST_STATEID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 test_stateid.average_latencyUnit: microsecType: averageBase: test_stateid.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 test_stateid.average_latencyUnit: microsecType: averageBase: test_stateid.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 test_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: test_stateid_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 test_stateid_avg_latencyUnit: microsecType: average,no-zero-valuesBase: test_stateid_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_test_stateid_total","title":"svm_nfs_test_stateid_total","text":"

      Total number of TEST_STATEID operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 test_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 test_stateid.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 test_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 test_stateid_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_throughput","title":"svm_nfs_throughput","text":"

      Rate of NFSv3 data transfers per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 nfs4_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nfs41_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nfs42_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_verify_avg_latency","title":"svm_nfs_verify_avg_latency","text":"

      Average latency of VERIFY procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 verify.average_latencyUnit: microsecType: averageBase: verify.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 verify_avg_latencyUnit: microsecType: average,no-zero-valuesBase: verify_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_verify_total","title":"svm_nfs_verify_total","text":"

      Total number of VERIFY procedures

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v4 verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 verify.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4 verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 verify_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_want_delegation_avg_latency","title":"svm_nfs_want_delegation_avg_latency","text":"

      Average latency of WANT_DELEGATION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 want_delegation.average_latencyUnit: microsecType: averageBase: want_delegation.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 want_delegation.average_latencyUnit: microsecType: averageBase: want_delegation.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 want_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: want_delegation_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 want_delegation_avg_latencyUnit: microsecType: average,no-zero-valuesBase: want_delegation_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_want_delegation_total","title":"svm_nfs_want_delegation_total","text":"

      Total number of WANT_DELEGATION operations.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v41 want_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 want_delegation.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv4_1 want_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 want_delegation_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_write_avg_latency","title":"svm_nfs_write_avg_latency","text":"

      Average latency of Write procedure requests. The counter keeps track of the average response time of Write requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 write.average_latencyUnit: microsecType: averageBase: write.total conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 write_avg_latencyUnit: microsecType: average,no-zero-valuesBase: write_total conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_write_ops","title":"svm_nfs_write_ops","text":"

      Total observed NFSv3 write operations per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_write_opsUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml"},{"location":"ontap-metrics/#svm_nfs_write_throughput","title":"svm_nfs_write_throughput","text":"

      Rate of NFSv3 write data transfers per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 total.write_throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 total.throughputUnit: b_per_secType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 nfsv3_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 nfs4_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 nfs41_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 nfs42_write_throughputUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_nfs_write_total","title":"svm_nfs_write_total","text":"

      Total number of Write procedure requests. It is the total number of write success and write error requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_nfs_v3 write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv3.yaml REST api/cluster/counter/tables/svm_nfs_v4 write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4.yaml REST api/cluster/counter/tables/svm_nfs_v41 write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_1.yaml REST api/cluster/counter/tables/svm_nfs_v42 write.totalUnit: noneType: rateBase: conf/restperf/9.12.0/nfsv4_2.yaml ZAPI perf-object-get-instances nfsv3 write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv3.yaml ZAPI perf-object-get-instances nfsv4 write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4.yaml ZAPI perf-object-get-instances nfsv4_1 write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/nfsv4_1.yaml ZAPI perf-object-get-instances nfsv4_2 write_totalUnit: noneType: rateBase: conf/zapiperf/cdot/9.11.0/nfsv4_2.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_abort_multipart_upload_failed","title":"svm_ontaps3_svm_abort_multipart_upload_failed","text":"

      Number of failed Abort Multipart Upload operations. svm_ontaps3_svm_abort_multipart_upload_failed is ontaps3_svm_abort_multipart_upload_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_abort_multipart_upload_failed_client_close","title":"svm_ontaps3_svm_abort_multipart_upload_failed_client_close","text":"

      Number of times Abort Multipart Upload operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_abort_multipart_upload_failed_client_close is ontaps3_svm_abort_multipart_upload_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_abort_multipart_upload_latency","title":"svm_ontaps3_svm_abort_multipart_upload_latency","text":"

      Average latency for Abort Multipart Upload operations. svm_ontaps3_svm_abort_multipart_upload_latency is ontaps3_svm_abort_multipart_upload_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_latencyUnit: microsecType: averageBase: abort_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: abort_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_abort_multipart_upload_rate","title":"svm_ontaps3_svm_abort_multipart_upload_rate","text":"

      Number of Abort Multipart Upload operations per second. svm_ontaps3_svm_abort_multipart_upload_rate is ontaps3_svm_abort_multipart_upload_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_abort_multipart_upload_total","title":"svm_ontaps3_svm_abort_multipart_upload_total","text":"

      Number of Abort Multipart Upload operations. svm_ontaps3_svm_abort_multipart_upload_total is ontaps3_svm_abort_multipart_upload_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server abort_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server abort_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_allow_access","title":"svm_ontaps3_svm_allow_access","text":"

      Number of times access was allowed. svm_ontaps3_svm_allow_access is ontaps3_svm_allow_access aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server allow_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server allow_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_anonymous_access","title":"svm_ontaps3_svm_anonymous_access","text":"

      Number of times anonymous access was allowed. svm_ontaps3_svm_anonymous_access is ontaps3_svm_anonymous_access aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server anonymous_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server anonymous_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_anonymous_deny_access","title":"svm_ontaps3_svm_anonymous_deny_access","text":"

      Number of times anonymous access was denied. svm_ontaps3_svm_anonymous_deny_access is ontaps3_svm_anonymous_deny_access aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server anonymous_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server anonymous_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_authentication_failures","title":"svm_ontaps3_svm_authentication_failures","text":"

      Number of authentication failures. svm_ontaps3_svm_authentication_failures is ontaps3_svm_authentication_failures aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server authentication_failuresUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server authentication_failuresUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_chunked_upload_reqs","title":"svm_ontaps3_svm_chunked_upload_reqs","text":"

      Total number of object store server chunked object upload requests. svm_ontaps3_svm_chunked_upload_reqs is ontaps3_svm_chunked_upload_reqs aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server chunked_upload_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server chunked_upload_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_complete_multipart_upload_failed","title":"svm_ontaps3_svm_complete_multipart_upload_failed","text":"

      Number of failed Complete Multipart Upload operations. svm_ontaps3_svm_complete_multipart_upload_failed is ontaps3_svm_complete_multipart_upload_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_complete_multipart_upload_failed_client_close","title":"svm_ontaps3_svm_complete_multipart_upload_failed_client_close","text":"

      Number of times Complete Multipart Upload operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_complete_multipart_upload_failed_client_close is ontaps3_svm_complete_multipart_upload_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_complete_multipart_upload_latency","title":"svm_ontaps3_svm_complete_multipart_upload_latency","text":"

      Average latency for Complete Multipart Upload operations. svm_ontaps3_svm_complete_multipart_upload_latency is ontaps3_svm_complete_multipart_upload_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_latencyUnit: microsecType: averageBase: complete_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: complete_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_complete_multipart_upload_rate","title":"svm_ontaps3_svm_complete_multipart_upload_rate","text":"

      Number of Complete Multipart Upload operations per second. svm_ontaps3_svm_complete_multipart_upload_rate is ontaps3_svm_complete_multipart_upload_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_complete_multipart_upload_total","title":"svm_ontaps3_svm_complete_multipart_upload_total","text":"

      Number of Complete Multipart Upload operations. svm_ontaps3_svm_complete_multipart_upload_total is ontaps3_svm_complete_multipart_upload_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server complete_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server complete_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_connected_connections","title":"svm_ontaps3_svm_connected_connections","text":"

      Number of object store server connections currently established. svm_ontaps3_svm_connected_connections is ontaps3_svm_connected_connections aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server connected_connectionsUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server connected_connectionsUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_connections","title":"svm_ontaps3_svm_connections","text":"

      Total number of object store server connections. svm_ontaps3_svm_connections is ontaps3_svm_connections aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server connectionsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server connectionsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_create_bucket_failed","title":"svm_ontaps3_svm_create_bucket_failed","text":"

      Number of failed Create Bucket operations. svm_ontaps3_svm_create_bucket_failed is ontaps3_svm_create_bucket_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_create_bucket_failed_client_close","title":"svm_ontaps3_svm_create_bucket_failed_client_close","text":"

      Number of times Create Bucket operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_create_bucket_failed_client_close is ontaps3_svm_create_bucket_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_create_bucket_latency","title":"svm_ontaps3_svm_create_bucket_latency","text":"

      Average latency for Create Bucket operations. svm_ontaps3_svm_create_bucket_latency is ontaps3_svm_create_bucket_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_latencyUnit: microsecType: averageBase: create_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: create_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_create_bucket_rate","title":"svm_ontaps3_svm_create_bucket_rate","text":"

      Number of Create Bucket operations per second. svm_ontaps3_svm_create_bucket_rate is ontaps3_svm_create_bucket_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_create_bucket_total","title":"svm_ontaps3_svm_create_bucket_total","text":"

      Number of Create Bucket operations. svm_ontaps3_svm_create_bucket_total is ontaps3_svm_create_bucket_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server create_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server create_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_default_deny_access","title":"svm_ontaps3_svm_default_deny_access","text":"

      Number of times access was denied by default and not through any policy statement. svm_ontaps3_svm_default_deny_access is ontaps3_svm_default_deny_access aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server default_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server default_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_bucket_failed","title":"svm_ontaps3_svm_delete_bucket_failed","text":"

      Number of failed Delete Bucket operations. svm_ontaps3_svm_delete_bucket_failed is ontaps3_svm_delete_bucket_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_bucket_failed_client_close","title":"svm_ontaps3_svm_delete_bucket_failed_client_close","text":"

      Number of times Delete Bucket operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_delete_bucket_failed_client_close is ontaps3_svm_delete_bucket_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_bucket_latency","title":"svm_ontaps3_svm_delete_bucket_latency","text":"

      Average latency for Delete Bucket operations. svm_ontaps3_svm_delete_bucket_latency is ontaps3_svm_delete_bucket_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_latencyUnit: microsecType: averageBase: delete_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: delete_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_bucket_rate","title":"svm_ontaps3_svm_delete_bucket_rate","text":"

      Number of Delete Bucket operations per second. svm_ontaps3_svm_delete_bucket_rate is ontaps3_svm_delete_bucket_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_bucket_total","title":"svm_ontaps3_svm_delete_bucket_total","text":"

      Number of Delete Bucket operations. svm_ontaps3_svm_delete_bucket_total is ontaps3_svm_delete_bucket_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_failed","title":"svm_ontaps3_svm_delete_object_failed","text":"

      Number of failed DELETE object operations. svm_ontaps3_svm_delete_object_failed is ontaps3_svm_delete_object_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_failed_client_close","title":"svm_ontaps3_svm_delete_object_failed_client_close","text":"

      Number of times DELETE object operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_delete_object_failed_client_close is ontaps3_svm_delete_object_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_latency","title":"svm_ontaps3_svm_delete_object_latency","text":"

      Average latency for DELETE object operations. svm_ontaps3_svm_delete_object_latency is ontaps3_svm_delete_object_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_latencyUnit: microsecType: averageBase: delete_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_latencyUnit: microsecType: average,no-zero-valuesBase: delete_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_rate","title":"svm_ontaps3_svm_delete_object_rate","text":"

      Number of DELETE object operations per second. svm_ontaps3_svm_delete_object_rate is ontaps3_svm_delete_object_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_tagging_failed","title":"svm_ontaps3_svm_delete_object_tagging_failed","text":"

      Number of failed DELETE object tagging operations. svm_ontaps3_svm_delete_object_tagging_failed is ontaps3_svm_delete_object_tagging_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_tagging_failed_client_close","title":"svm_ontaps3_svm_delete_object_tagging_failed_client_close","text":"

      Number of times DELETE object tagging operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_delete_object_tagging_failed_client_close is ontaps3_svm_delete_object_tagging_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_tagging_latency","title":"svm_ontaps3_svm_delete_object_tagging_latency","text":"

      Average latency for DELETE object tagging operations. svm_ontaps3_svm_delete_object_tagging_latency is ontaps3_svm_delete_object_tagging_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_latencyUnit: microsecType: averageBase: delete_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: delete_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_tagging_rate","title":"svm_ontaps3_svm_delete_object_tagging_rate","text":"

      Number of DELETE object tagging operations per second. svm_ontaps3_svm_delete_object_tagging_rate is ontaps3_svm_delete_object_tagging_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_tagging_total","title":"svm_ontaps3_svm_delete_object_tagging_total","text":"

      Number of DELETE object tagging operations. svm_ontaps3_svm_delete_object_tagging_total is ontaps3_svm_delete_object_tagging_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_delete_object_total","title":"svm_ontaps3_svm_delete_object_total","text":"

      Number of DELETE object operations. svm_ontaps3_svm_delete_object_total is ontaps3_svm_delete_object_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server delete_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server delete_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_explicit_deny_access","title":"svm_ontaps3_svm_explicit_deny_access","text":"

      Number of times access was denied explicitly by a policy statement. svm_ontaps3_svm_explicit_deny_access is ontaps3_svm_explicit_deny_access aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server explicit_deny_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server explicit_deny_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_bucket_acl_failed","title":"svm_ontaps3_svm_get_bucket_acl_failed","text":"

      Number of failed GET Bucket ACL operations. svm_ontaps3_svm_get_bucket_acl_failed is ontaps3_svm_get_bucket_acl_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_acl_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_acl_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_bucket_acl_total","title":"svm_ontaps3_svm_get_bucket_acl_total","text":"

      Number of GET Bucket ACL operations. svm_ontaps3_svm_get_bucket_acl_total is ontaps3_svm_get_bucket_acl_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_acl_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_acl_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_bucket_versioning_failed","title":"svm_ontaps3_svm_get_bucket_versioning_failed","text":"

      Number of failed Get Bucket Versioning operations. svm_ontaps3_svm_get_bucket_versioning_failed is ontaps3_svm_get_bucket_versioning_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_versioning_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_versioning_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_bucket_versioning_total","title":"svm_ontaps3_svm_get_bucket_versioning_total","text":"

      Number of Get Bucket Versioning operations. svm_ontaps3_svm_get_bucket_versioning_total is ontaps3_svm_get_bucket_versioning_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_bucket_versioning_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_bucket_versioning_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_data","title":"svm_ontaps3_svm_get_data","text":"

      Rate of GET object data transfers per second. svm_ontaps3_svm_get_data is ontaps3_svm_get_data aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_acl_failed","title":"svm_ontaps3_svm_get_object_acl_failed","text":"

      Number of failed GET Object ACL operations. svm_ontaps3_svm_get_object_acl_failed is ontaps3_svm_get_object_acl_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_acl_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_acl_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_acl_total","title":"svm_ontaps3_svm_get_object_acl_total","text":"

      Number of GET Object ACL operations. svm_ontaps3_svm_get_object_acl_total is ontaps3_svm_get_object_acl_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_acl_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_acl_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_failed","title":"svm_ontaps3_svm_get_object_failed","text":"

      Number of failed GET object operations. svm_ontaps3_svm_get_object_failed is ontaps3_svm_get_object_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_failed_client_close","title":"svm_ontaps3_svm_get_object_failed_client_close","text":"

      Number of times GET object operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_get_object_failed_client_close is ontaps3_svm_get_object_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_lastbyte_latency","title":"svm_ontaps3_svm_get_object_lastbyte_latency","text":"

      Average last-byte latency for GET object operations. svm_ontaps3_svm_get_object_lastbyte_latency is ontaps3_svm_get_object_lastbyte_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_lastbyte_latencyUnit: microsecType: averageBase: get_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_lastbyte_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_lastbyte_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_latency","title":"svm_ontaps3_svm_get_object_latency","text":"

      Average first-byte latency for GET object operations. svm_ontaps3_svm_get_object_latency is ontaps3_svm_get_object_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_latencyUnit: microsecType: averageBase: get_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_rate","title":"svm_ontaps3_svm_get_object_rate","text":"

      Number of GET object operations per second. svm_ontaps3_svm_get_object_rate is ontaps3_svm_get_object_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_tagging_failed","title":"svm_ontaps3_svm_get_object_tagging_failed","text":"

      Number of failed GET object tagging operations. svm_ontaps3_svm_get_object_tagging_failed is ontaps3_svm_get_object_tagging_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_tagging_failed_client_close","title":"svm_ontaps3_svm_get_object_tagging_failed_client_close","text":"

      Number of times GET object tagging operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_get_object_tagging_failed_client_close is ontaps3_svm_get_object_tagging_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_tagging_latency","title":"svm_ontaps3_svm_get_object_tagging_latency","text":"

      Average latency for GET object tagging operations. svm_ontaps3_svm_get_object_tagging_latency is ontaps3_svm_get_object_tagging_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_latencyUnit: microsecType: averageBase: get_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: get_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_tagging_rate","title":"svm_ontaps3_svm_get_object_tagging_rate","text":"

      Number of GET object tagging operations per second. svm_ontaps3_svm_get_object_tagging_rate is ontaps3_svm_get_object_tagging_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_tagging_total","title":"svm_ontaps3_svm_get_object_tagging_total","text":"

      Number of GET object tagging operations. svm_ontaps3_svm_get_object_tagging_total is ontaps3_svm_get_object_tagging_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_get_object_total","title":"svm_ontaps3_svm_get_object_total","text":"

      Number of GET object operations. svm_ontaps3_svm_get_object_total is ontaps3_svm_get_object_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server get_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server get_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_group_policy_evaluated","title":"svm_ontaps3_svm_group_policy_evaluated","text":"

      Number of times group policies were evaluated. svm_ontaps3_svm_group_policy_evaluated is ontaps3_svm_group_policy_evaluated aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server group_policy_evaluatedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server group_policy_evaluatedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_bucket_failed","title":"svm_ontaps3_svm_head_bucket_failed","text":"

      Number of failed HEAD bucket operations. svm_ontaps3_svm_head_bucket_failed is ontaps3_svm_head_bucket_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_bucket_failed_client_close","title":"svm_ontaps3_svm_head_bucket_failed_client_close","text":"

      Number of times HEAD bucket operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_head_bucket_failed_client_close is ontaps3_svm_head_bucket_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_bucket_latency","title":"svm_ontaps3_svm_head_bucket_latency","text":"

      Average latency for HEAD bucket operations. svm_ontaps3_svm_head_bucket_latency is ontaps3_svm_head_bucket_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_latencyUnit: microsecType: averageBase: head_bucket_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_latencyUnit: microsecType: average,no-zero-valuesBase: head_bucket_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_bucket_rate","title":"svm_ontaps3_svm_head_bucket_rate","text":"

      Number of HEAD bucket operations per second. svm_ontaps3_svm_head_bucket_rate is ontaps3_svm_head_bucket_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_bucket_total","title":"svm_ontaps3_svm_head_bucket_total","text":"

      Number of HEAD bucket operations. svm_ontaps3_svm_head_bucket_total is ontaps3_svm_head_bucket_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_bucket_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_bucket_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_object_failed","title":"svm_ontaps3_svm_head_object_failed","text":"

      Number of failed HEAD Object operations. svm_ontaps3_svm_head_object_failed is ontaps3_svm_head_object_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_object_failed_client_close","title":"svm_ontaps3_svm_head_object_failed_client_close","text":"

      Number of times HEAD object operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_head_object_failed_client_close is ontaps3_svm_head_object_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_object_latency","title":"svm_ontaps3_svm_head_object_latency","text":"

      Average latency for HEAD object operations. svm_ontaps3_svm_head_object_latency is ontaps3_svm_head_object_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_latencyUnit: microsecType: averageBase: head_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_latencyUnit: microsecType: average,no-zero-valuesBase: head_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_object_rate","title":"svm_ontaps3_svm_head_object_rate","text":"

      Number of HEAD Object operations per second. svm_ontaps3_svm_head_object_rate is ontaps3_svm_head_object_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_head_object_total","title":"svm_ontaps3_svm_head_object_total","text":"

      Number of HEAD Object operations. svm_ontaps3_svm_head_object_total is ontaps3_svm_head_object_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server head_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server head_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_initiate_multipart_upload_failed","title":"svm_ontaps3_svm_initiate_multipart_upload_failed","text":"

      Number of failed Initiate Multipart Upload operations. svm_ontaps3_svm_initiate_multipart_upload_failed is ontaps3_svm_initiate_multipart_upload_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_initiate_multipart_upload_failed_client_close","title":"svm_ontaps3_svm_initiate_multipart_upload_failed_client_close","text":"

      Number of times Initiate Multipart Upload operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_initiate_multipart_upload_failed_client_close is ontaps3_svm_initiate_multipart_upload_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_initiate_multipart_upload_latency","title":"svm_ontaps3_svm_initiate_multipart_upload_latency","text":"

      Average latency for Initiate Multipart Upload operations. svm_ontaps3_svm_initiate_multipart_upload_latency is ontaps3_svm_initiate_multipart_upload_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_latencyUnit: microsecType: averageBase: initiate_multipart_upload_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_latencyUnit: microsecType: average,no-zero-valuesBase: initiate_multipart_upload_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_initiate_multipart_upload_rate","title":"svm_ontaps3_svm_initiate_multipart_upload_rate","text":"

      Number of Initiate Multipart Upload operations per second. svm_ontaps3_svm_initiate_multipart_upload_rate is ontaps3_svm_initiate_multipart_upload_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_initiate_multipart_upload_total","title":"svm_ontaps3_svm_initiate_multipart_upload_total","text":"

      Number of Initiate Multipart Upload operations. svm_ontaps3_svm_initiate_multipart_upload_total is ontaps3_svm_initiate_multipart_upload_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server initiate_multipart_upload_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server initiate_multipart_upload_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_input_flow_control_entry","title":"svm_ontaps3_svm_input_flow_control_entry","text":"

      Number of times input flow control was entered. svm_ontaps3_svm_input_flow_control_entry is ontaps3_svm_input_flow_control_entry aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server input_flow_control_entryUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server input_flow_control_entryUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_input_flow_control_exit","title":"svm_ontaps3_svm_input_flow_control_exit","text":"

      Number of times input flow control was exited. svm_ontaps3_svm_input_flow_control_exit is ontaps3_svm_input_flow_control_exit aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server input_flow_control_exitUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server input_flow_control_exitUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_buckets_failed","title":"svm_ontaps3_svm_list_buckets_failed","text":"

      Number of failed LIST Buckets operations. svm_ontaps3_svm_list_buckets_failed is ontaps3_svm_list_buckets_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_buckets_failed_client_close","title":"svm_ontaps3_svm_list_buckets_failed_client_close","text":"

      Number of times LIST Bucket operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_list_buckets_failed_client_close is ontaps3_svm_list_buckets_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_buckets_latency","title":"svm_ontaps3_svm_list_buckets_latency","text":"

      Average latency for LIST Buckets operations. svm_ontaps3_svm_list_buckets_latency is ontaps3_svm_list_buckets_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_latencyUnit: microsecType: averageBase: list_buckets_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_latencyUnit: microsecType: average,no-zero-valuesBase: head_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_buckets_rate","title":"svm_ontaps3_svm_list_buckets_rate","text":"

      Number of LIST Buckets operations per second. svm_ontaps3_svm_list_buckets_rate is ontaps3_svm_list_buckets_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_buckets_total","title":"svm_ontaps3_svm_list_buckets_total","text":"

      Number of LIST Buckets operations. svm_ontaps3_svm_list_buckets_total is ontaps3_svm_list_buckets_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_buckets_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_buckets_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_object_versions_failed","title":"svm_ontaps3_svm_list_object_versions_failed","text":"

      Number of failed LIST object versions operations. svm_ontaps3_svm_list_object_versions_failed is ontaps3_svm_list_object_versions_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_object_versions_failed_client_close","title":"svm_ontaps3_svm_list_object_versions_failed_client_close","text":"

      Number of times LIST object versions operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_list_object_versions_failed_client_close is ontaps3_svm_list_object_versions_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_object_versions_latency","title":"svm_ontaps3_svm_list_object_versions_latency","text":"

      Average latency for LIST Object versions operations. svm_ontaps3_svm_list_object_versions_latency is ontaps3_svm_list_object_versions_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_latencyUnit: microsecType: averageBase: list_object_versions_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_latencyUnit: microsecType: average,no-zero-valuesBase: list_object_versions_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_object_versions_rate","title":"svm_ontaps3_svm_list_object_versions_rate","text":"

      Number of LIST Object Versions operations per second. svm_ontaps3_svm_list_object_versions_rate is ontaps3_svm_list_object_versions_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_object_versions_total","title":"svm_ontaps3_svm_list_object_versions_total","text":"

      Number of LIST Object Versions operations. svm_ontaps3_svm_list_object_versions_total is ontaps3_svm_list_object_versions_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_object_versions_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_object_versions_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_objects_failed","title":"svm_ontaps3_svm_list_objects_failed","text":"

      Number of failed LIST objects operations. svm_ontaps3_svm_list_objects_failed is ontaps3_svm_list_objects_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_objects_failed_client_close","title":"svm_ontaps3_svm_list_objects_failed_client_close","text":"

      Number of times LIST objects operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_list_objects_failed_client_close is ontaps3_svm_list_objects_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_objects_latency","title":"svm_ontaps3_svm_list_objects_latency","text":"

      Average latency for LIST Objects operations. svm_ontaps3_svm_list_objects_latency is ontaps3_svm_list_objects_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_latencyUnit: microsecType: averageBase: list_objects_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_latencyUnit: microsecType: average,no-zero-valuesBase: list_objects_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_objects_rate","title":"svm_ontaps3_svm_list_objects_rate","text":"

      Number of LIST Objects operations per second. svm_ontaps3_svm_list_objects_rate is ontaps3_svm_list_objects_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_objects_total","title":"svm_ontaps3_svm_list_objects_total","text":"

      Number of LIST Objects operations. svm_ontaps3_svm_list_objects_total is ontaps3_svm_list_objects_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_objects_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_objects_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_uploads_failed","title":"svm_ontaps3_svm_list_uploads_failed","text":"

      Number of failed LIST Upload operations. svm_ontaps3_svm_list_uploads_failed is ontaps3_svm_list_uploads_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_uploads_failed_client_close","title":"svm_ontaps3_svm_list_uploads_failed_client_close","text":"

      Number of times LIST Upload operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_list_uploads_failed_client_close is ontaps3_svm_list_uploads_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_uploads_latency","title":"svm_ontaps3_svm_list_uploads_latency","text":"

      Average latency for LIST Upload operations. svm_ontaps3_svm_list_uploads_latency is ontaps3_svm_list_uploads_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_latencyUnit: microsecType: averageBase: list_uploads_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_latencyUnit: microsecType: average,no-zero-valuesBase: list_uploads_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_uploads_rate","title":"svm_ontaps3_svm_list_uploads_rate","text":"

      Number of LIST Upload operations per second. svm_ontaps3_svm_list_uploads_rate is ontaps3_svm_list_uploads_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_list_uploads_total","title":"svm_ontaps3_svm_list_uploads_total","text":"

      Number of LIST Upload operations. svm_ontaps3_svm_list_uploads_total is ontaps3_svm_list_uploads_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server list_uploads_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server list_uploads_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_max_cmds_per_connection","title":"svm_ontaps3_svm_max_cmds_per_connection","text":"

      Maximum commands pipelined at any instance on a connection. svm_ontaps3_svm_max_cmds_per_connection is ontaps3_svm_max_cmds_per_connection aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_commands_per_connectionUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_cmds_per_connectionUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_max_connected_connections","title":"svm_ontaps3_svm_max_connected_connections","text":"

      Maximum number of object store server connections established at one time. svm_ontaps3_svm_max_connected_connections is ontaps3_svm_max_connected_connections aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_connected_connectionsUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_connected_connectionsUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_max_requests_outstanding","title":"svm_ontaps3_svm_max_requests_outstanding","text":"

      Maximum number of object store server requests in process at one time. svm_ontaps3_svm_max_requests_outstanding is ontaps3_svm_max_requests_outstanding aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server maximum_requests_outstandingUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server max_requests_outstandingUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_multi_delete_reqs","title":"svm_ontaps3_svm_multi_delete_reqs","text":"

      Total number of object store server multiple object delete requests. svm_ontaps3_svm_multi_delete_reqs is ontaps3_svm_multi_delete_reqs aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server multiple_delete_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server multi_delete_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_output_flow_control_entry","title":"svm_ontaps3_svm_output_flow_control_entry","text":"

      Number of output flow control was entered. svm_ontaps3_svm_output_flow_control_entry is ontaps3_svm_output_flow_control_entry aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server output_flow_control_entryUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server output_flow_control_entryUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_output_flow_control_exit","title":"svm_ontaps3_svm_output_flow_control_exit","text":"

      Number of times output flow control was exited. svm_ontaps3_svm_output_flow_control_exit is ontaps3_svm_output_flow_control_exit aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server output_flow_control_exitUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server output_flow_control_exitUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_presigned_url_reqs","title":"svm_ontaps3_svm_presigned_url_reqs","text":"

      Total number of presigned object store server URL requests. svm_ontaps3_svm_presigned_url_reqs is ontaps3_svm_presigned_url_reqs aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server presigned_url_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server presigned_url_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_bucket_versioning_failed","title":"svm_ontaps3_svm_put_bucket_versioning_failed","text":"

      Number of failed Put Bucket Versioning operations. svm_ontaps3_svm_put_bucket_versioning_failed is ontaps3_svm_put_bucket_versioning_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_bucket_versioning_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_bucket_versioning_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_bucket_versioning_total","title":"svm_ontaps3_svm_put_bucket_versioning_total","text":"

      Number of Put Bucket Versioning operations. svm_ontaps3_svm_put_bucket_versioning_total is ontaps3_svm_put_bucket_versioning_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_bucket_versioning_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_bucket_versioning_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_data","title":"svm_ontaps3_svm_put_data","text":"

      Rate of PUT object data transfers per second. svm_ontaps3_svm_put_data is ontaps3_svm_put_data aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_dataUnit: b_per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_dataUnit: b_per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_failed","title":"svm_ontaps3_svm_put_object_failed","text":"

      Number of failed PUT object operations. svm_ontaps3_svm_put_object_failed is ontaps3_svm_put_object_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_failed_client_close","title":"svm_ontaps3_svm_put_object_failed_client_close","text":"

      Number of times PUT object operation failed due to the case where client closed the connection while the operation was still pending on server. svm_ontaps3_svm_put_object_failed_client_close is ontaps3_svm_put_object_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_latency","title":"svm_ontaps3_svm_put_object_latency","text":"

      Average latency for PUT object operations. svm_ontaps3_svm_put_object_latency is ontaps3_svm_put_object_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_latencyUnit: microsecType: averageBase: put_object_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_latencyUnit: microsecType: average,no-zero-valuesBase: put_object_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_rate","title":"svm_ontaps3_svm_put_object_rate","text":"

      Number of PUT object operations per second. svm_ontaps3_svm_put_object_rate is ontaps3_svm_put_object_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_tagging_failed","title":"svm_ontaps3_svm_put_object_tagging_failed","text":"

      Number of failed PUT object tagging operations. svm_ontaps3_svm_put_object_tagging_failed is ontaps3_svm_put_object_tagging_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_tagging_failed_client_close","title":"svm_ontaps3_svm_put_object_tagging_failed_client_close","text":"

      Number of times PUT object tagging operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_put_object_tagging_failed_client_close is ontaps3_svm_put_object_tagging_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_tagging_latency","title":"svm_ontaps3_svm_put_object_tagging_latency","text":"

      Average latency for PUT object tagging operations. svm_ontaps3_svm_put_object_tagging_latency is ontaps3_svm_put_object_tagging_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_latencyUnit: microsecType: averageBase: put_object_tagging_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_latencyUnit: microsecType: average,no-zero-valuesBase: put_object_tagging_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_tagging_rate","title":"svm_ontaps3_svm_put_object_tagging_rate","text":"

      Number of PUT object tagging operations per second. svm_ontaps3_svm_put_object_tagging_rate is ontaps3_svm_put_object_tagging_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_tagging_total","title":"svm_ontaps3_svm_put_object_tagging_total","text":"

      Number of PUT object tagging operations. svm_ontaps3_svm_put_object_tagging_total is ontaps3_svm_put_object_tagging_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_tagging_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_tagging_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_put_object_total","title":"svm_ontaps3_svm_put_object_total","text":"

      Number of PUT object operations. svm_ontaps3_svm_put_object_total is ontaps3_svm_put_object_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server put_object_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server put_object_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_request_parse_errors","title":"svm_ontaps3_svm_request_parse_errors","text":"

      Number of request parser errors due to malformed requests. svm_ontaps3_svm_request_parse_errors is ontaps3_svm_request_parse_errors aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server request_parse_errorsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server request_parse_errorsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_requests","title":"svm_ontaps3_svm_requests","text":"

      Total number of object store server requests. svm_ontaps3_svm_requests is ontaps3_svm_requests aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server requestsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_requests_outstanding","title":"svm_ontaps3_svm_requests_outstanding","text":"

      Number of object store server requests in process. svm_ontaps3_svm_requests_outstanding is ontaps3_svm_requests_outstanding aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server requests_outstandingUnit: noneType: rawBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server requests_outstandingUnit: noneType: raw,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_root_user_access","title":"svm_ontaps3_svm_root_user_access","text":"

      Number of times access was done by root user. svm_ontaps3_svm_root_user_access is ontaps3_svm_root_user_access aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server root_user_accessUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server root_user_accessUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_server_connection_close","title":"svm_ontaps3_svm_server_connection_close","text":"

      Number of connection closes triggered by server due to fatal errors. svm_ontaps3_svm_server_connection_close is ontaps3_svm_server_connection_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server server_connection_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server server_connection_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_signature_v2_reqs","title":"svm_ontaps3_svm_signature_v2_reqs","text":"

      Total number of object store server signature V2 requests. svm_ontaps3_svm_signature_v2_reqs is ontaps3_svm_signature_v2_reqs aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server signature_v2_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server signature_v2_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_signature_v4_reqs","title":"svm_ontaps3_svm_signature_v4_reqs","text":"

      Total number of object store server signature V4 requests. svm_ontaps3_svm_signature_v4_reqs is ontaps3_svm_signature_v4_reqs aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server signature_v4_requestsUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server signature_v4_reqsUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_tagging","title":"svm_ontaps3_svm_tagging","text":"

      Number of requests with tagging specified. svm_ontaps3_svm_tagging is ontaps3_svm_tagging aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server taggingUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server taggingUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_upload_part_failed","title":"svm_ontaps3_svm_upload_part_failed","text":"

      Number of failed Upload Part operations. svm_ontaps3_svm_upload_part_failed is ontaps3_svm_upload_part_failed aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_failedUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_failedUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_upload_part_failed_client_close","title":"svm_ontaps3_svm_upload_part_failed_client_close","text":"

      Number of times Upload Part operation failed because client terminated connection while the operation was still pending on server. svm_ontaps3_svm_upload_part_failed_client_close is ontaps3_svm_upload_part_failed_client_close aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_failed_client_closeUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_failed_client_closeUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_upload_part_latency","title":"svm_ontaps3_svm_upload_part_latency","text":"

      Average latency for Upload Part operations. svm_ontaps3_svm_upload_part_latency is ontaps3_svm_upload_part_latency aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_latencyUnit: microsecType: averageBase: upload_part_total conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_latencyUnit: microsecType: average,no-zero-valuesBase: upload_part_latency_base conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_upload_part_rate","title":"svm_ontaps3_svm_upload_part_rate","text":"

      Number of Upload Part operations per second. svm_ontaps3_svm_upload_part_rate is ontaps3_svm_upload_part_rate aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_rateUnit: per_secType: rateBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_rateUnit: per_secType: rate,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_ontaps3_svm_upload_part_total","title":"svm_ontaps3_svm_upload_part_total","text":"

      Number of Upload Part operations. svm_ontaps3_svm_upload_part_total is ontaps3_svm_upload_part_total aggregated by svm.

      API Endpoint Metric Template REST api/cluster/counter/tables/object_store_server upload_part_totalUnit: noneType: deltaBase: conf/restperf/9.14.1/ontap_s3_svm.yaml ZAPI perf-object-get-instances object_store_server upload_part_totalUnit: noneType: delta,no-zero-valuesBase: conf/zapiperf/cdot/9.8.0/ontap_s3_svm.yaml"},{"location":"ontap-metrics/#svm_vol_avg_latency","title":"svm_vol_avg_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_other_latency","title":"svm_vol_other_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm other_latencyUnit: microsecType: averageBase: total_other_ops conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_other_ops","title":"svm_vol_other_ops","text":"

      Number of other operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm total_other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_read_data","title":"svm_vol_read_data","text":"

      Bytes read per second

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm bytes_readUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_read_latency","title":"svm_vol_read_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_read_ops","title":"svm_vol_read_ops","text":"

      Number of read operations per second from the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_total_ops","title":"svm_vol_total_ops","text":"

      Number of operations per second serviced by the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_write_data","title":"svm_vol_write_data","text":"

      Bytes written per second

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm bytes_writtenUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_write_latency","title":"svm_vol_write_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vol_write_ops","title":"svm_vol_write_ops","text":"

      Number of write operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume:svm total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume_svm.yaml ZAPI perf-object-get-instances volume:vserver write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_connections_active","title":"svm_vscan_connections_active","text":"

      Total number of current active connections

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan connections_activeUnit: noneType: rawBase: conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan connections_activeUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_dispatch_latency","title":"svm_vscan_dispatch_latency","text":"

      Average dispatch latency

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan dispatch.latencyUnit: microsecType: averageBase: dispatch.requests conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan dispatch_latencyUnit: microsecType: averageBase: dispatch_latency_base conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_scan_latency","title":"svm_vscan_scan_latency","text":"

      Average scan latency

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan scan.latencyUnit: microsecType: averageBase: scan.requests conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan scan_latencyUnit: microsecType: averageBase: scan_latency_base conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_scan_noti_received_rate","title":"svm_vscan_scan_noti_received_rate","text":"

      Total number of scan notifications received by the dispatcher per second

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan scan.notification_received_rateUnit: per_secType: rateBase: conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan scan_noti_received_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#svm_vscan_scan_request_dispatched_rate","title":"svm_vscan_scan_request_dispatched_rate","text":"

      Total number of scan requests sent to the Vscanner per second

      API Endpoint Metric Template REST api/cluster/counter/tables/svm_vscan scan.request_dispatched_rateUnit: per_secType: rateBase: conf/restperf/9.13.0/vscan_svm.yaml ZAPI perf-object-get-instances offbox_vscan scan_request_dispatched_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/vscan_svm.yaml"},{"location":"ontap-metrics/#token_copy_bytes","title":"token_copy_bytes","text":"

      Total number of bytes copied.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_copy.bytesUnit: noneType: rateBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_copy_bytesUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_copy_failure","title":"token_copy_failure","text":"

      Number of failed token copy requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_copy.failuresUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_copy_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_copy_success","title":"token_copy_success","text":"

      Number of successful token copy requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_copy.successesUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_copy_successUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_create_bytes","title":"token_create_bytes","text":"

      Total number of bytes for which tokens are created.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_create.bytesUnit: noneType: rateBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_create_bytesUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_create_failure","title":"token_create_failure","text":"

      Number of failed token create requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_create.failuresUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_create_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_create_success","title":"token_create_success","text":"

      Number of successful token create requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_create.successesUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_create_successUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_zero_bytes","title":"token_zero_bytes","text":"

      Total number of bytes zeroed.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_zero.bytesUnit: noneType: rateBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_zero_bytesUnit: noneType: rateBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_zero_failure","title":"token_zero_failure","text":"

      Number of failed token zero requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_zero.failuresUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_zero_failureUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#token_zero_success","title":"token_zero_success","text":"

      Number of successful token zero requests.

      API Endpoint Metric Template REST api/cluster/counter/tables/token_manager token_zero.successesUnit: noneType: deltaBase: conf/restperf/9.12.0/token_manager.yaml ZAPI perf-object-get-instances token_manager token_zero_successUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/token_manager.yaml"},{"location":"ontap-metrics/#volume_autosize_grow_threshold_percent","title":"volume_autosize_grow_threshold_percent","text":"

      Used space threshold which triggers autogrow. When the size-used is greater than this percent of size-total, the volume will be grown. The computed value is rounded down. The default value of this element varies from 85% to 98%, depending on the volume size. It is an error for the grow threshold to be less than or equal to the shrink threshold.

      API Endpoint Metric Template REST api/private/cli/volume autosize_grow_threshold_percent conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-autosize-attributes.grow-threshold-percent conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_autosize_maximum_size","title":"volume_autosize_maximum_size","text":"

      The maximum size (in bytes) to which the volume would be grown automatically. The default value is 20% greater than the volume size. It is an error for the maximum volume size to be less than the current volume size. It is also an error for the maximum size to be less than or equal to the minimum size.

      API Endpoint Metric Template REST api/private/cli/volume max_autosize conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-autosize-attributes.maximum-size conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_avg_latency","title":"volume_avg_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process all the operations on the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume average_latencyUnit: microsecType: averageBase: total_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume avg_latencyUnit: microsecType: averageBase: total_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_capacity_tier_footprint","title":"volume_capacity_tier_footprint","text":"API Endpoint Metric Template REST api/private/cli/volume/footprint volume_blocks_footprint_bin1 conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_capacity_tier_footprint_percent","title":"volume_capacity_tier_footprint_percent","text":"API Endpoint Metric Template REST api/private/cli/volume/footprint volume_blocks_footprint_bin1_percent conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_filesystem_size","title":"volume_filesystem_size","text":"

      Filesystem size (in bytes) of the volume. This is the total usable size of the volume, not including WAFL reserve. This value is the same as Size except for certain SnapMirror destination volumes. It is possible for destination volumes to have a different filesystem-size because the filesystem-size is sent across from the source volume. This field is valid only when the volume is online.

      API Endpoint Metric Template REST api/private/cli/volume filesystem_size conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.filesystem-size conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_inode_files_total","title":"volume_inode_files_total","text":"

      Total user-visible file (inode) count, i.e., current maximum number of user-visible files (inodes) that this volume can currently hold.

      API Endpoint Metric Template REST api/private/cli/volume files conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-inode-attributes.files-total conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_inode_files_used","title":"volume_inode_files_used","text":"

      Number of user-visible files (inodes) used. This field is valid only when the volume is online.

      API Endpoint Metric Template REST api/private/cli/volume files_used conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-inode-attributes.files-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_inode_used_percent","title":"volume_inode_used_percent","text":"

      volume_inode_files_used / volume_inode_total

      API Endpoint Metric Template REST api/private/cli/volume inode_files_used, inode_files_total conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter inode_files_used, inode_files_total conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_access_latency","title":"volume_nfs_access_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol access requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.access_latencyUnit: microsecType: averageBase: nfs.access_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_access_latencyUnit: microsecType: averageBase: nfs_access_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_access_ops","title":"volume_nfs_access_ops","text":"

      Number of NFS accesses per second to the volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.access_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_access_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_getattr_latency","title":"volume_nfs_getattr_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol getattr requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.getattr_latencyUnit: microsecType: averageBase: nfs.getattr_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_getattr_latencyUnit: microsecType: averageBase: nfs_getattr_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_getattr_ops","title":"volume_nfs_getattr_ops","text":"

      Number of NFS getattr per second to the volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.getattr_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_getattr_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_lookup_latency","title":"volume_nfs_lookup_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol lookup requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.lookup_latencyUnit: microsecType: averageBase: nfs.lookup_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_lookup_latencyUnit: microsecType: averageBase: nfs_lookup_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_lookup_ops","title":"volume_nfs_lookup_ops","text":"

      Number of NFS lookups per second to the volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.lookup_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_lookup_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_other_latency","title":"volume_nfs_other_latency","text":"

      Average time for the WAFL filesystem to process other NFS operations to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.other_latencyUnit: microsecType: averageBase: nfs.other_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_other_latencyUnit: microsecType: averageBase: nfs_other_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_other_ops","title":"volume_nfs_other_ops","text":"

      Number of other NFS operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_punch_hole_latency","title":"volume_nfs_punch_hole_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol hole-punch requests to the volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.punch_hole_latencyUnit: microsecType: averageBase: nfs.punch_hole_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_punch_hole_latencyUnit: microsecType: averageBase: nfs_punch_hole_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_punch_hole_ops","title":"volume_nfs_punch_hole_ops","text":"

      Number of NFS hole-punch requests per second to the volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.punch_hole_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_punch_hole_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_read_latency","title":"volume_nfs_read_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol read requests to the volume; not including NFS protocol request processing or network communication time which will also be included in client observed NFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.read_latencyUnit: microsecType: averageBase: nfs.read_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_read_latencyUnit: microsecType: averageBase: nfs_read_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_read_ops","title":"volume_nfs_read_ops","text":"

      Number of NFS read operations per second from the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_setattr_latency","title":"volume_nfs_setattr_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol setattr requests to the volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.setattr_latencyUnit: microsecType: averageBase: nfs.setattr_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_setattr_latencyUnit: microsecType: averageBase: nfs_setattr_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_setattr_ops","title":"volume_nfs_setattr_ops","text":"

      Number of NFS setattr requests per second to the volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.setattr_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_setattr_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_total_ops","title":"volume_nfs_total_ops","text":"

      Number of total NFS operations per second to the volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_write_latency","title":"volume_nfs_write_latency","text":"

      Average time for the WAFL filesystem to process NFS protocol write requests to the volume; not including NFS protocol request processing or network communication time, which will also be included in client observed NFS request latency

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.write_latencyUnit: microsecType: averageBase: nfs.write_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_write_latencyUnit: microsecType: averageBase: nfs_write_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_nfs_write_ops","title":"volume_nfs_write_ops","text":"

      Number of NFS write operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume nfs.write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume nfs_write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_num_compress_attempts","title":"volume_num_compress_attempts","text":"API Endpoint Metric Template REST api/private/cli/volume/efficiency/stat num_compress_attempts conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_num_compress_fail","title":"volume_num_compress_fail","text":"API Endpoint Metric Template REST api/private/cli/volume/efficiency/stat num_compress_fail conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_other_latency","title":"volume_other_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process other operations to the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume other_latencyUnit: microsecType: averageBase: total_other_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume other_latencyUnit: microsecType: averageBase: other_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_other_ops","title":"volume_other_ops","text":"

      Number of other operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume total_other_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume other_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_overwrite_reserve_available","title":"volume_overwrite_reserve_available","text":"

      amount of storage space that is currently available for overwrites, calculated by subtracting the total amount of overwrite reserve space from the amount that has already been used.

      API Endpoint Metric Template REST api/private/cli/volume overwrite_reserve_total, overwrite_reserve_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter overwrite_reserve_total, overwrite_reserve_used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_overwrite_reserve_total","title":"volume_overwrite_reserve_total","text":"

      The size (in bytes) that is reserved for overwriting snapshotted data in an otherwise full volume. This space is usable only by space-reserved LUNs and files, and then only when the volume is full.This field is valid only when the volume is online.

      API Endpoint Metric Template REST api/private/cli/volume overwrite_reserve conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.overwrite-reserve conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_overwrite_reserve_used","title":"volume_overwrite_reserve_used","text":"

      The reserved size (in bytes) that is not available for new overwrites. The number includes both the reserved size which has actually been used for overwrites as well as the size which was never allocated in the first place. This field is valid only when the volume is online.

      API Endpoint Metric Template REST api/private/cli/volume overwrite_reserve_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.overwrite-reserve-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_performance_tier_footprint","title":"volume_performance_tier_footprint","text":"API Endpoint Metric Template REST api/private/cli/volume/footprint volume_blocks_footprint_bin0 conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_performance_tier_footprint_percent","title":"volume_performance_tier_footprint_percent","text":"API Endpoint Metric Template REST api/private/cli/volume/footprint volume_blocks_footprint_bin0_percent conf/rest/9.14.0/volume.yaml"},{"location":"ontap-metrics/#volume_read_data","title":"volume_read_data","text":"

      Bytes read per second

      API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_readUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_read_latency","title":"volume_read_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process read request to the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume read_latencyUnit: microsecType: averageBase: total_read_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_latencyUnit: microsecType: averageBase: read_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_read_ops","title":"volume_read_ops","text":"

      Number of read operations per second from the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume total_read_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume read_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_compress_saved","title":"volume_sis_compress_saved","text":"

      The total disk space (in bytes) that is saved by compressing blocks on the referenced file system.

      API Endpoint Metric Template REST api/private/cli/volume compression_space_saved conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.compression-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_compress_saved_percent","title":"volume_sis_compress_saved_percent","text":"

      Percentage of the total disk space that is saved by compressing blocks on the referenced file system

      API Endpoint Metric Template REST api/private/cli/volume compression_space_saved_percent conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.percentage-compression-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_dedup_saved","title":"volume_sis_dedup_saved","text":"

      The total disk space (in bytes) that is saved by deduplication and file cloning.

      API Endpoint Metric Template REST api/private/cli/volume dedupe_space_saved conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.deduplication-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_dedup_saved_percent","title":"volume_sis_dedup_saved_percent","text":"

      Percentage of the total disk space that is saved by deduplication and file cloning.

      API Endpoint Metric Template REST api/private/cli/volume dedupe_space_saved_percent conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.percentage-deduplication-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_total_saved","title":"volume_sis_total_saved","text":"

      Total space saved (in bytes) in the volume due to deduplication, compression, and file cloning.

      API Endpoint Metric Template REST api/private/cli/volume sis_space_saved conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.total-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_sis_total_saved_percent","title":"volume_sis_total_saved_percent","text":"

      Percentage of total disk space that is saved by compressing blocks, deduplication and file cloning.

      API Endpoint Metric Template REST api/private/cli/volume sis_space_saved_percent conf/rest/9.12.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-sis-attributes.percentage-total-space-saved conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size","title":"volume_size","text":"

      Physical size of the volume, in bytes. The minimum size for a FlexVol volume is 20MB and the minimum size for a FlexGroup volume is 200MB per constituent. The recommended size for a FlexGroup volume is a minimum of 100GB per constituent. For all volumes, the default size is equal to the minimum size.

      API Endpoint Metric Template REST api/private/cli/volume size conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size_available","title":"volume_size_available","text":"

      The size (in bytes) that is still available in the volume. This field is valid only when the volume is online.

      API Endpoint Metric Template REST api/private/cli/volume available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size_total","title":"volume_size_total","text":"

      Total usable size (in bytes) of the volume, not including WAFL reserve or volume snapshot reserve. If the volume is restricted or offline, a value of 0 is returned.

      API Endpoint Metric Template REST api/private/cli/volume total conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-total conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size_used","title":"volume_size_used","text":"

      Number of bytes used in the volume. If the volume is restricted or offline, a value of 0 is returned.

      API Endpoint Metric Template REST api/private/cli/volume used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_size_used_percent","title":"volume_size_used_percent","text":"

      percentage of utilized storage space in a volume relative to its total capacity

      API Endpoint Metric Template REST api/private/cli/volume percent_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.percentage-size-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_count","title":"volume_snapshot_count","text":"

      Number of Snapshot copies in the volume.

      API Endpoint Metric Template REST api/private/cli/volume snapshot_count conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-snapshot-attributes.snapshot-count conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_available","title":"volume_snapshot_reserve_available","text":"

      The size (in bytes) that is available for Snapshot copies inside the Snapshot reserve. This value is zero if Snapshot spill is present. For 'none' guaranteed volumes, this may get reduced due to less available space in the aggregate. This parameter is not supported on Infinite Volumes.

      API Endpoint Metric Template REST api/private/cli/volume snapshot_reserve_available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.snapshot-reserve-available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_percent","title":"volume_snapshot_reserve_percent","text":"

      The percentage of volume disk space that has been set aside as reserve for snapshot usage.

      API Endpoint Metric Template REST api/private/cli/volume percent_snapshot_space conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.percentage-snapshot-reserve conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_size","title":"volume_snapshot_reserve_size","text":"

      The size (in bytes) in the volume that has been set aside as reserve for snapshot usage.

      API Endpoint Metric Template REST api/private/cli/volume snapshot_reserve_size conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.snapshot-reserve-size conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_used","title":"volume_snapshot_reserve_used","text":"

      amount of storage space currently used by a volume's snapshot reserve, which is calculated by subtracting the snapshot reserve available space from the snapshot reserve size.

      API Endpoint Metric Template REST api/private/cli/volume snapshot_reserve_size, snapshot_reserve_available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter snapshot_reserve_size, snapshot_reserve_available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshot_reserve_used_percent","title":"volume_snapshot_reserve_used_percent","text":"

      Percentage of the volume reserved for snapshots that has been used. Note that in some scenarios, it is possible to pass 100% of the space allocated.

      API Endpoint Metric Template REST api/private/cli/volume snapshot_space_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.percentage-snapshot-reserve-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshots_size_available","title":"volume_snapshots_size_available","text":"

      Total free space (in bytes) available in the volume and the snapshot reserve. If this value is 0 or negative, a new snapshot cannot be created.

      API Endpoint Metric Template REST api/private/cli/volume size_available_for_snapshots conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-available-for-snapshots conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_snapshots_size_used","title":"volume_snapshots_size_used","text":"

      The size (in bytes) that is used by snapshots in the volume.

      API Endpoint Metric Template REST api/private/cli/volume size_used_by_snapshots conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.size-used-by-snapshots conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_expected_available","title":"volume_space_expected_available","text":"

      The size (in bytes) that should be available for the volume irrespective of available size in the aggregate.This is same as size-available for 'volume' guaranteed volumes.For 'none' guaranteed volumes this value is calculated as if the aggregate has enough backing disk space to fully support the volume's size.Similar to the size-available property, this does not include Snapshot reserve.This count gets reduced if snapshots consume space above Snapshot reserve threshold.This parameter is not supported on Infinite Volumes.

      API Endpoint Metric Template REST api/private/cli/volume expected_available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.expected-available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_available","title":"volume_space_logical_available","text":"

      The size (in bytes) that is logically available in the volume.This is the amount of free space available considering space saved by the storage efficiency features as being used.This does not include Snapshot reserve.This parameter is not supported on FlexGroups or Infinite Volumes.

      API Endpoint Metric Template REST api/private/cli/volume logical_available conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-available conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_used","title":"volume_space_logical_used","text":"

      The size (in bytes) that is logically used in the volume.This value includes all the space saved by the storage efficiency features along with the physically used space.This does not include Snapshot reserve but does consider Snapshot spill.This parameter is not supported on FlexGroups or Infinite Volumes.

      API Endpoint Metric Template REST api/private/cli/volume logical_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_used_by_afs","title":"volume_space_logical_used_by_afs","text":"

      The size (in bytes) that is logically used by the active filesystem of the volume.This value differs from 'logical-used' by the amount of Snapshot spill that exceeds Snapshot reserve.This parameter is not supported on FlexGroups or Infinite Volumes.

      API Endpoint Metric Template REST api/private/cli/volume logical_used_by_afs conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-used-by-afs conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_used_by_snapshots","title":"volume_space_logical_used_by_snapshots","text":"

      The size (in bytes) that is logically used across all Snapshot copies in the volume. This value differs from 'size-used-by-snapshots' by the space saved by the storage efficiency features across the Snapshot copies.This parameter is not supported on FlexGroups or Infinite Volumes.

      API Endpoint Metric Template REST api/private/cli/volume logical_used_by_snapshots conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-used-by-snapshots conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_logical_used_percent","title":"volume_space_logical_used_percent","text":"

      Percentage of the logical used size of the volume.This parameter is not supported on FlexGroups or Infinite Volumes.

      API Endpoint Metric Template REST api/private/cli/volume logical_used_percent conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.logical-used-percent conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_performance_tier_inactive_user_data","title":"volume_space_performance_tier_inactive_user_data","text":"

      The size that is physically used in the performance tier of the volume and has a cold temperature. This parameter is only supported if the volume is in an aggregate that is either attached to object store or could be attached to an object store.

      API Endpoint Metric Template REST api/private/cli/volume performance_tier_inactive_user_data conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.performance-tier-inactive-user-data conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_performance_tier_inactive_user_data_percent","title":"volume_space_performance_tier_inactive_user_data_percent","text":"

      The size (in percent) that is physically used in the performance tier of the volume and has a cold temperature. This parameter is only supported if the volume is in an aggregate that is either attached to object store or could be attached to an object store.

      API Endpoint Metric Template REST api/private/cli/volume performance_tier_inactive_user_data_percent conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.performance-tier-inactive-user-data-percent conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_physical_used","title":"volume_space_physical_used","text":"

      The size (in bytes) that is physically used in the volume.This differs from 'total-used' space by the space that is reserved for future writes.The value includes blocks in use by Snapshot copies.This field is valid only if the volume is online.

      API Endpoint Metric Template REST api/private/cli/volume virtual_used conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.physical-used conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_space_physical_used_percent","title":"volume_space_physical_used_percent","text":"

      The size (in percent) that is physically used in the volume.The percentage is based on volume size including the space reserved for Snapshot copies.This field is valid only if the volume is online.

      API Endpoint Metric Template REST api/private/cli/volume virtual_used_percent conf/rest/9.14.0/volume.yaml ZAPI volume-get-iter volume-attributes.volume-space-attributes.physical-used-percent conf/zapi/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_total_data","title":"volume_total_data","text":"

      This metric represents the total amount of data that has been read from and written to a specific volume.

      API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_read, bytes_writtenUnit: Type: Base: conf/restperf/9.12.0/volume.yaml ZAPI volume read_data, write_dataUnit: Type: Base: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_total_ops","title":"volume_total_ops","text":"

      Number of operations per second serviced by the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume total_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume total_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_write_data","title":"volume_write_data","text":"

      Bytes written per second

      API Endpoint Metric Template REST api/cluster/counter/tables/volume bytes_writtenUnit: b_per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_dataUnit: b_per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_write_latency","title":"volume_write_latency","text":"

      Average latency in microseconds for the WAFL filesystem to process write request to the volume; not including request processing or network communication time

      API Endpoint Metric Template REST api/cluster/counter/tables/volume write_latencyUnit: microsecType: averageBase: total_write_ops conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_latencyUnit: microsecType: averageBase: write_ops conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#volume_write_ops","title":"volume_write_ops","text":"

      Number of write operations per second to the volume

      API Endpoint Metric Template REST api/cluster/counter/tables/volume total_write_opsUnit: per_secType: rateBase: conf/restperf/9.12.0/volume.yaml ZAPI perf-object-get-instances volume write_opsUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/volume.yaml"},{"location":"ontap-metrics/#vscan_scan_latency","title":"vscan_scan_latency","text":"

      Average scan latency

      API Endpoint Metric Template REST api/cluster/counter/tables/vscan scan.latencyUnit: microsecType: averageBase: scan.requests conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scan_latencyUnit: microsecType: averageBase: scan_latency_base conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#vscan_scan_request_dispatched_rate","title":"vscan_scan_request_dispatched_rate","text":"

      Total number of scan requests sent to the scanner per second

      API Endpoint Metric Template REST api/cluster/counter/tables/vscan scan.request_dispatched_rateUnit: per_secType: rateBase: conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scan_request_dispatched_rateUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#vscan_scanner_stats_pct_cpu_used","title":"vscan_scanner_stats_pct_cpu_used","text":"

      Percentage CPU utilization on scanner calculated over the last 15 seconds.

      API Endpoint Metric Template REST api/cluster/counter/tables/vscan scanner.stats_percent_cpu_usedUnit: noneType: rawBase: conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scanner_stats_pct_cpu_usedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#vscan_scanner_stats_pct_mem_used","title":"vscan_scanner_stats_pct_mem_used","text":"

      Percentage RAM utilization on scanner calculated over the last 15 seconds.

      API Endpoint Metric Template REST api/cluster/counter/tables/vscan scanner.stats_percent_mem_usedUnit: noneType: rawBase: conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scanner_stats_pct_mem_usedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#vscan_scanner_stats_pct_network_used","title":"vscan_scanner_stats_pct_network_used","text":"

      Percentage network utilization on scanner calculated for the last 15 seconds.

      API Endpoint Metric Template REST api/cluster/counter/tables/vscan scanner.stats_percent_network_usedUnit: noneType: rawBase: conf/restperf/9.13.0/vscan.yaml ZAPI perf-object-get-instances offbox_vscan_server scanner_stats_pct_network_usedUnit: noneType: rawBase: conf/zapiperf/cdot/9.8.0/vscan.yaml"},{"location":"ontap-metrics/#wafl_avg_msg_latency","title":"wafl_avg_msg_latency","text":"

      Average turnaround time for WAFL messages in milliseconds.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl average_msg_latencyUnit: millisecType: averageBase: msg_total conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl avg_wafl_msg_latencyUnit: millisecType: averageBase: wafl_msg_total conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_avg_non_wafl_msg_latency","title":"wafl_avg_non_wafl_msg_latency","text":"

      Average turnaround time for non-WAFL messages in milliseconds.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl average_non_wafl_msg_latencyUnit: millisecType: averageBase: non_wafl_msg_total conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl avg_non_wafl_msg_latencyUnit: millisecType: averageBase: non_wafl_msg_total conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_avg_repl_msg_latency","title":"wafl_avg_repl_msg_latency","text":"

      Average turnaround time for replication WAFL messages in milliseconds.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl average_replication_msg_latencyUnit: millisecType: averageBase: replication_msg_total conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl avg_wafl_repl_msg_latencyUnit: millisecType: averageBase: wafl_repl_msg_total conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_cp_count","title":"wafl_cp_count","text":"

      Array of counts of different types of Consistency Points (CP).

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl cp_countUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl cp_countUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_cp_phase_times","title":"wafl_cp_phase_times","text":"

      Array of percentage time spent in different phases of Consistency Point (CP).

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl cp_phase_timesUnit: percentType: percentBase: total_cp_msecs conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl cp_phase_timesUnit: percentType: percentBase: total_cp_msecs conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_memory_free","title":"wafl_memory_free","text":"

      The current WAFL memory available in the system.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl memory_freeUnit: mbType: rawBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_memory_freeUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_memory_used","title":"wafl_memory_used","text":"

      The current WAFL memory used in the system.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl memory_usedUnit: mbType: rawBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_memory_usedUnit: mbType: rawBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_msg_total","title":"wafl_msg_total","text":"

      Total number of WAFL messages per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl msg_totalUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_msg_totalUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_non_wafl_msg_total","title":"wafl_non_wafl_msg_total","text":"

      Total number of non-WAFL messages per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl non_wafl_msg_totalUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl non_wafl_msg_totalUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_read_io_type","title":"wafl_read_io_type","text":"

      Percentage of reads served from buffer cache, external cache, or disk.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl read_io_typeUnit: percentType: percentBase: read_io_type_base conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl read_io_typeUnit: percentType: percentBase: read_io_type_base conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_cache","title":"wafl_reads_from_cache","text":"

      WAFL reads from cache.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_cacheUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_cacheUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_cloud","title":"wafl_reads_from_cloud","text":"

      WAFL reads from cloud storage.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_cloudUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_cloudUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_cloud_s2c_bin","title":"wafl_reads_from_cloud_s2c_bin","text":"

      WAFL reads from cloud storage via s2c bin.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_cloud_s2c_binUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_cloud_s2c_binUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_disk","title":"wafl_reads_from_disk","text":"

      WAFL reads from disk.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_diskUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_diskUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_ext_cache","title":"wafl_reads_from_ext_cache","text":"

      WAFL reads from external cache.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_external_cacheUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_ext_cacheUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_fc_miss","title":"wafl_reads_from_fc_miss","text":"

      WAFL reads from remote volume for fc_miss.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_fc_missUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_fc_missUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_pmem","title":"wafl_reads_from_pmem","text":"

      Wafl reads from persistent mmeory.

      API Endpoint Metric Template ZAPI perf-object-get-instances wafl wafl_reads_from_pmemUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_reads_from_ssd","title":"wafl_reads_from_ssd","text":"

      WAFL reads from SSD.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl reads_from_ssdUnit: noneType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_reads_from_ssdUnit: noneType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_repl_msg_total","title":"wafl_repl_msg_total","text":"

      Total number of replication WAFL messages per second.

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl replication_msg_totalUnit: per_secType: rateBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl wafl_repl_msg_totalUnit: per_secType: rateBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_total_cp_msecs","title":"wafl_total_cp_msecs","text":"

      Milliseconds spent in Consistency Point (CP).

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl total_cp_msecsUnit: millisecType: deltaBase: conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl total_cp_msecsUnit: millisecType: deltaBase: conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"ontap-metrics/#wafl_total_cp_util","title":"wafl_total_cp_util","text":"

      Percentage of time spent in a Consistency Point (CP).

      API Endpoint Metric Template REST api/cluster/counter/tables/wafl total_cp_utilUnit: percentType: percentBase: cpu_elapsed_time conf/restperf/9.12.0/wafl.yaml ZAPI perf-object-get-instances wafl total_cp_utilUnit: percentType: percentBase: cpu_elapsed_time conf/zapiperf/cdot/9.8.0/wafl.yaml"},{"location":"plugins/","title":"Plugins","text":""},{"location":"plugins/#built-in-plugins","title":"Built-in Plugins","text":"

      The plugin feature allows users to manipulate and customize data collected by collectors without changing the collectors. Plugins have the same capabilities as collectors and therefore can collect data on their own as well. Furthermore, multiple plugins can be put in a pipeline to perform more complex operations.

      Harvest architecture defines two types of plugins:

      Built-in generic - Statically compiled, generic plugins. \"Generic\" means the plugin is collector-agnostic. These plugins are provided in this package and listed in the right sidebar.

      Built-in custom - These plugins are statically compiled, collector-specific plugins. Their source code should reside inside the plugins/ subdirectory of the collector package (e.g. (cmd/collectors/rest/plugins/svm/svm.go)[https://github.com/NetApp/harvest/blob/main/cmd/collectors/rest/plugins/svm/svm.go]). Custom plugins have access to all the parameters of their parent collector and should therefore be treated with great care.

      This documentation gives an overview of builtin plugins. For other plugins, see their respective documentation. For writing your own plugin, see Developer's documentation.

      Note: the rules are executed in the same order as you've added them.

      "},{"location":"plugins/#aggregator","title":"Aggregator","text":"

      Aggregator creates a new collection of metrics (Matrix) by summarizing and/or averaging metric values from an existing Matrix for a given label. For example, if the collected metrics are for volumes, you can create an aggregation for nodes or svms.

      "},{"location":"plugins/#rule-syntax","title":"Rule syntax","text":"

      simplest case:

      plugins:\n  Aggregator:\n    - LABEL\n# will aggregate a new Matrix based on target label LABEL\n

      If you want to specify which labels should be included in the new instances, you can add those space-seperated after LABEL:

          - LABEL LABEL1,LABEL2\n    # same, but LABEL1 and LABEL2 will be copied into the new instances\n    # (default is to only copy LABEL and any global labels (such as cluster and datacenter)\n

      Or include all labels:

          - LABEL ...\n    # copy all labels of the original instance\n

      By default, aggregated metrics will be prefixed with LABEL. For example if the object of the original Matrix is volume (meaning metrics are prefixed with volume_) and LABEL is aggr, then the metric volume_read_ops will become aggr_volume_read_ops, etc. You can override this by providing the <>OBJ using the following syntax:

          - LABEL<>OBJ\n    # use OBJ as the object of the new matrix, e.g. if the original object is \"volume\" and you\n    # want to leave metric names unchanged, use \"volume\"\n

      Finally, sometimes you only want to aggregate instances with a specific label value. You can use <VALUE> for that ( optionally follow by OBJ):

          - LABEL<VALUE>\n    # aggregate all instances if LABEL has value VALUE\n    - LABEL<`VALUE`>\n    # same, but VALUE is regular expression\n    - LABEL<LABELX=`VALUE`>\n    # same, but check against \"LABELX\" (instead of \"LABEL\")\n

      Examples:

      plugins:\n  Aggregator:\n    # will aggregate metrics of the aggregate. The labels \"node\" and \"type\" are included in the new instances\n    - aggr node type\n    # aggregate instances if label \"type\" has value \"flexgroup\"\n    # include all original labels\n    - type<flexgroup> ...\n    # aggregate all instances if value of \"volume\" ends with underscore and 4 digits\n    - volume<`_\\d{4}$`>\n
      "},{"location":"plugins/#aggregation-rules","title":"Aggregation rules","text":"

      The plugin tries to intelligently aggregate metrics based on a few rules:

      • Sum - the default rule, if no other rules apply
      • Average - if any of the following is true:
        • metric name has suffix _percent or _percentage
        • metric name has prefix average_ or avg_
        • metric has property (metric.GetProperty()) percent or average
      • Weighted Average - applied if metric has property average and suffix _latency and if there is a matching _ops metric. (This is currently only matching to ZapiPerf metrics, which use the Property field of metrics.)
      • Ignore - metrics created by some plugins, such as value_to_num by LabelAgent
      "},{"location":"plugins/#max","title":"Max","text":"

      Max creates a new collection of metrics (Matrix) by calculating max of metric values from an existing Matrix for a given label. For example, if the collected metrics are for disks, you can create max at the node or aggregate level. Refer Max Examples for more details.

      "},{"location":"plugins/#max-rule-syntax","title":"Max Rule syntax","text":"

      simplest case:

      plugins:\n  Max:\n    - LABEL\n# create a new Matrix of max values on target label LABEL\n

      If you want to specify which labels should be included in the new instances, you can add those space-seperated after LABEL:

          - LABEL LABEL1,LABEL2\n    # similar to the above example, but LABEL1 and LABEL2 will be copied into the new instances\n    # (default is to only copy LABEL and all global labels (such as cluster and datacenter)\n

      Or include all labels:

          - LABEL ...\n    # copy all labels of the original instance\n

      By default, metrics will be prefixed with LABEL. For example if the object of the original Matrix is volume (meaning metrics are prefixed with volume_) and LABEL is aggr, then the metric volume_read_ops will become aggr_volume_read_ops. You can override this using the <>OBJ pattern shown below:

          - LABEL<>OBJ\n    # use OBJ as the object of the new matrix, e.g. if the original object is \"volume\" and you\n    # want to leave metric names unchanged, use \"volume\"\n

      Finally, sometimes you only want to generate instances with a specific label value. You can use <VALUE> for that ( optionally followed by OBJ):

          - LABEL<VALUE>\n    # aggregate all instances if LABEL has value VALUE\n    - LABEL<`VALUE`>\n    # same, but VALUE is regular expression\n    - LABEL<LABELX=`VALUE`>\n    # same, but check against \"LABELX\" (instead of \"LABEL\")\n
      "},{"location":"plugins/#max-examples","title":"Max Examples","text":"
      plugins:\n  Max:\n    # will create max of each aggregate metric. All metrics will be prefixed with aggr_disk_max. All labels are included in the new instances\n    - aggr<>aggr_disk_max ...\n    # calculate max instances if label \"disk\" has value \"1.1.0\". Prefix with disk_max\n    # include all original labels\n    - disk<1.1.0>disk_max ...\n    # max of all instances if value of \"volume\" ends with underscore and 4 digits\n    - volume<`_\\d{4}$`>\n
      "},{"location":"plugins/#labelagent","title":"LabelAgent","text":"

      LabelAgent are used to manipulate instance labels based on rules. You can define multiple rules, here is an example of what you could add to the yaml file of a collector:

      plugins:\n  LabelAgent:\n    # our rules:\n    split: node `/` ,aggr,plex,disk\n    replace_regex: node node `^(node)_(\\d+)_.*$` `Node-$2`\n

      Note: Labels for creating new label should use name defined in right side of =>. If not present then left side of => is used.

      "},{"location":"plugins/#split","title":"split","text":"

      Rule syntax:

      split:\n  - LABEL `SEP` LABEL1,LABEL2,LABEL3\n# source label - separator - comma-seperated target labels\n

      Splits the value of a given label by separator SEP and creates new labels if their number matches to the number of target labels defined in rule. To discard a subvalue, just add a redundant , in the names of the target labels.

      Example:

      split:\n  - node `/` ,aggr,plex,disk\n# will split the value of \"node\" using separator \"/\"\n# will expect 4 values: first will be discarded, remaining\n# three will be stored as labels \"aggr\", \"plex\" and \"disk\"\n
      "},{"location":"plugins/#split_regex","title":"split_regex","text":"

      Does the same as split but uses a regular expression instead of a separator.

      Rule syntax:

      split_regex:\n  - LABEL `REGEX` LABEL1,LABEL2,LABEL3\n

      Example:

      split_regex:\n  - node `.*_(ag\\d+)_(p\\d+)_(d\\d+)` aggr,plex,disk\n# will look for \"_ag\", \"_p\", \"_d\", each followed by one\n# or more numbers, if there is a match, the submatches\n# will be stored as \"aggr\", \"plex\" and \"disk\"\n
      "},{"location":"plugins/#split_pairs","title":"split_pairs","text":"

      Rule syntax:

      split_pairs:\n  - LABEL `SEP1` `SEP2`\n# source label - pair separator - key-value separator\n

      Extracts key-value pairs from the value of source label LABEL. Note that you need to add these keys in the export options, otherwise they will not be exported.

      Example:

      split_pairs:\n  - comment ` ` `:`\n# will split pairs using a single space and split key-values using colon\n# e.g. if comment=\"owner:jack contact:some@email\", the result will be\n# two new labels: owner=\"jack\" and contact=\"some@email\"\n
      "},{"location":"plugins/#join","title":"join","text":"

      Join multiple label values using separator SEP and create a new label.

      Rule syntax:

      join:\n  - LABEL `SEP` LABEL1,LABEL2,LABEL3\n# target label - separator - comma-seperated source labels\n

      Example:

      join:\n  - plex_long `_` aggr,plex\n# will look for the values of labels \"aggr\" and \"plex\",\n# if they are set, a new \"plex_long\" label will be added\n# by joining their values with \"_\"\n
      "},{"location":"plugins/#replace","title":"replace","text":"

      Substitute substring OLD with NEW in label SOURCE and store in TARGET. Note that target and source labels can be the same.

      Rule syntax:

      replace:\n  - SOURCE TARGET `OLD` `NEW`\n# source label - target label - substring to replace - replace with\n

      Example:

      replace:\n  - node node_short `node_` ``\n# this rule will just remove \"node_\" from all values of label\n# \"node\". E.g. if label is \"node_jamaica1\", it will rewrite it \n# as \"jamaica1\"\n
      "},{"location":"plugins/#replace_regex","title":"replace_regex","text":"

      Same as replace, but will use a regular expression instead of OLD. Note you can use $n to specify nth submatch in NEW.

      Rule syntax:

      replace_regex:\n  - SOURCE TARGET `REGEX` `NEW`\n# source label - target label - substring to replace - replace with\n

      Example:

      replace_regex:\n  - node node `^(node)_(\\d+)_.*$` `Node-$2`\n# if there is a match, will capitalize \"Node\" and remove suffixes.\n# E.g. if label is \"node_10_dc2\", it will rewrite it as\n# will rewrite it as \"Node-10\"\n
      "},{"location":"plugins/#exclude_equals","title":"exclude_equals","text":"

      Exclude each instance, if the value of LABEL is exactly VALUE. Exclude means that metrics for this instance will not be exported.

      Rule syntax:

      exclude_equals:\n  - LABEL `VALUE`\n# label name - label value\n

      Example:

      exclude_equals:\n  - vol_type `flexgroup_constituent`\n# all instances, which have label \"vol_type\" with value\n# \"flexgroup_constituent\" will not be exported\n
      "},{"location":"plugins/#exclude_contains","title":"exclude_contains","text":"

      Same as exclude_equals, but all labels that contain VALUE will be excluded

      Rule syntax:

      exclude_contains:\n  - LABEL `VALUE`\n# label name - label value\n

      Example:

      exclude_contains:\n  - vol_type `flexgroup_`\n# all instances, which have label \"vol_type\" which contain\n# \"flexgroup_\" will not be exported\n
      "},{"location":"plugins/#exclude_regex","title":"exclude_regex","text":"

      Same as exclude_equals, but will use a regular expression and all matching instances will be excluded.

      Rule syntax:

      exclude_regex:\n  - LABEL `REGEX`\n# label name - regular expression\n

      Example:

      exclude_regex:\n  - vol_type `^flex`\n# all instances, which have label \"vol_type\" which starts with\n# \"flex\" will not be exported\n
      "},{"location":"plugins/#include_equals","title":"include_equals","text":"

      Include each instance, if the value of LABEL is exactly VALUE. Include means that metrics for this instance will be exported and instances that do not match will not be exported.

      Rule syntax:

      include_equals:\n  - LABEL `VALUE`\n# label name - label value\n

      Example:

      include_equals:\n  - vol_type `flexgroup_constituent`\n# all instances, which have label \"vol_type\" with value\n# \"flexgroup_constituent\" will be exported\n
      "},{"location":"plugins/#include_contains","title":"include_contains","text":"

      Same as include_equals, but all labels that contain VALUE will be included

      Rule syntax:

      include_contains:\n  - LABEL `VALUE`\n# label name - label value\n

      Example:

      include_contains:\n  - vol_type `flexgroup_`\n# all instances, which have label \"vol_type\" which contain\n# \"flexgroup_\" will be exported\n
      "},{"location":"plugins/#include_regex","title":"include_regex","text":"

      Same as include_equals, but a regular expression will be used for inclusion. Similar to the other includes, all matching instances will be included and all non-matching will not be exported.

      Rule syntax:

      include_regex:\n  - LABEL `REGEX`\n# label name - regular expression\n

      Example:

      include_regex:\n  - vol_type `^flex`\n# all instances, which have label \"vol_type\" which starts with\n# \"flex\" will be exported\n
      "},{"location":"plugins/#value_mapping","title":"value_mapping","text":"

      value_mapping was deprecated in 21.11 and removed in 22.02. Use value_to_num mapping instead.

      "},{"location":"plugins/#value_to_num","title":"value_to_num","text":"

      Map values of a given label to a numeric metric (of type uint8). This rule maps values of a given label to a numeric metric (of type unit8). Healthy is mapped to 1 and all non-healthy values are mapped to 0.

      This is handy to manipulate the data in the DB or Grafana (e.g. change color based on status or create alert).

      Note that you don't define the numeric values yourself, instead, you only provide the possible (expected) values, the plugin will map each value to its index in the rule.

      Rule syntax:

      value_to_num:\n  - METRIC LABEL ZAPI_VALUE REST_VALUE `N`\n# map values of LABEL to 1 if it is ZAPI_VALUE or REST_VALUE\n# otherwise, value of METRIC is set to N\n

      The default value N is optional, if no default value is given and the label value does not match any of the given values, the metric value will not be set.

      Examples:

      value_to_num:\n  - status state up online `0`\n# a new metric will be created with the name \"status\"\n# if an instance has label \"state\" with value \"up\", the metric value will be 1,\n# if it's \"online\", the value will be set to 1,\n# if it's any other value, it will be set to the specified default, 0\n
      value_to_num:\n  - status state up online `4`\n# metric value will be set to 1 if \"state\" is \"up\", otherwise to **4**\n
      value_to_num:\n  - status outage - - `0` #ok_value is empty value. \n# metric value will be set to 1 if \"outage\" is empty, if it's any other value, it will be set to the default, 0\n# '-' is a special symbol in this mapping, and it will be converted to blank while processing.\n
      "},{"location":"plugins/#value_to_num_regex","title":"value_to_num_regex","text":"

      Same as value_to_num, but will use a regular expression. All matches are mapped to 1 and non-matches are mapped to 0.

      This is handy to manipulate the data in the DB or Grafana (e.g. change color based on status or create alert).

      Note that you don't define the numeric values, instead, you provide the expected values and the plugin will map each value to its index in the rule.

      Rule syntax:

      value_to_num_regex:\n  - METRIC LABEL ZAPI_REGEX REST_REGEX `N`\n# map values of LABEL to 1 if it matches ZAPI_REGEX or REST_REGEX\n# otherwise, value of METRIC is set to N\n

      The default value N is optional, if no default value is given and the label value does not match any of the given values, the metric value will not be set.

      Examples:

      value_to_num_regex:\n  - certificateuser methods .*cert.*$ .*certificate.*$ `0`\n# a new metric will be created with the name \"certificateuser\"\n# if an instance has label \"methods\" with value contains \"cert\", the metric value will be 1,\n# if value contains \"certificate\", the value will be set to 1,\n# if value doesn't contain \"cert\" and \"certificate\", it will be set to the specified default, 0\n
      value_to_num_regex:\n  - status state ^up$ ^ok$ `4`\n# metric value will be set to 1 if label \"state\" matches regex, otherwise set to **4**\n
      "},{"location":"plugins/#metricagent","title":"MetricAgent","text":"

      MetricAgent are used to manipulate metrics based on rules. You can define multiple rules, here is an example of what you could add to the yaml file of a collector:

      plugins:\n  MetricAgent:\n    compute_metric:\n      - snapshot_maxfiles_possible ADD snapshot.max_files_available snapshot.max_files_used\n      - raid_disk_count ADD block_storage.primary.disk_count block_storage.hybrid_cache.disk_count\n

      Note: Metric names used to create new metrics can come from the left or right side of the rename operator (=>) Note: The metric agent currently does not work for histogram or array metrics.

      "},{"location":"plugins/#compute_metric","title":"compute_metric","text":"

      This rule creates a new metric (of type float64) using the provided scalar or an existing metric value combined with a mathematical operation.

      You can provide a numeric value or a metric name with an operation. The plugin will use the provided number or fetch the value of a given metric, perform the requested mathematical operation, and store the result in new custom metric.

      Currently, we support these operations: ADD SUBTRACT MULTIPLY DIVIDE PERCENT

      Rule syntax:

      compute_metric:\n  - METRIC OPERATION METRIC1 METRIC2 METRIC3\n# target new metric - mathematical operation - input metric names \n# apply OPERATION on metric values of METRIC1, METRIC2 and METRIC3 and set result in METRIC\n# METRIC1, METRIC2, METRIC3 can be a scalar or an existing metric name.\n

      Examples:

      compute_metric:\n  - space_total ADD space_available space_used\n# a new metric will be created with the name \"space_total\"\n# if an instance has metric \"space_available\" with value \"1000\", and \"space_used\" with value \"400\",\n# the result value will be \"1400\" and set to metric \"space_total\".\n
      compute_metric:\n  - disk_count ADD primary.disk_count secondary.disk_count hybrid.disk_count\n# value of metric \"disk_count\" would be addition of all the given disk_counts metric values.\n# disk_count = primary.disk_count + secondary.disk_count + hybrid.disk_count\n
      compute_metric:\n  - files_available SUBTRACT files files_used\n# value of metric \"files_available\" would be subtraction of the metric value of files_used from metric value of files.\n# files_available = files - files_used\n
      compute_metric:\n  - total_bytes MULTIPLY bytes_per_sector sector_count\n# value of metric \"total_bytes\" would be multiplication of metric value of bytes_per_sector and metric value of sector_count.\n# total_bytes = bytes_per_sector * sector_count\n
      compute_metric:\n  - uptime MULTIPLY stats.power_on_hours 60 60\n# value of metric \"uptime\" would be multiplication of metric value of stats.power_on_hours and scalar value of 60 * 60.\n# total_bytes = bytes_per_sector * sector_count\n
      compute_metric:\n  - transmission_rate DIVIDE transfer.bytes_transferred transfer.total_duration\n# value of metric \"transmission_rate\" would be division of metric value of transfer.bytes_transferred by metric value of transfer.total_duration.\n# transmission_rate = transfer.bytes_transferred / transfer.total_duration\n
      compute_metric:\n  - inode_used_percent PERCENT inode_files_used inode_files_total\n# a new metric named \"inode_used_percent\" will be created by dividing the metric \"inode_files_used\" by \n#  \"inode_files_total\" and multiplying the result by 100.\n# inode_used_percent = inode_files_used / inode_files_total * 100\n
      "},{"location":"plugins/#changelog","title":"ChangeLog","text":"

      The ChangeLog plugin is a feature of Harvest, designed to detect and track changes related to the creation, modification, and deletion of an object. By default, it supports volume, svm, and node objects. Its functionality can be extended to track changes in other objects by making relevant changes in the template.

      Please note that the ChangeLog plugin requires the uuid label, which is unique, to be collected by the template. Without the uuid label, the plugin will not function.

      The ChangeLog feature only detects changes when Harvest is up and running. It does not detect changes that occur when Harvest is down. Additionally, the plugin does not detect changes in metric values by default, but it can be configured to do so.

      "},{"location":"plugins/#enabling-the-plugin","title":"Enabling the Plugin","text":"

      The plugin can be enabled in the templates under the plugins section.

      For volume, svm, and node objects, you can enable the plugin with the following configuration:

      plugins:\n  - ChangeLog\n

      For other objects, you need to specify the labels to track in the plugin configuration. These labels should be relevant to the object you want to track. If these labels are not specified in the template, the plugin will not be able to track changes for the object.

      Here's an example of how to enable the plugin for an aggregate object:

      plugins:\n  - ChangeLog:\n      track:\n        - aggr\n        - node\n        - state\n

      In the above configuration, the plugin will track changes in the aggr, node, and state labels for the aggregate object.

      "},{"location":"plugins/#default-tracking-for-svm-node-volume","title":"Default Tracking for svm, node, volume","text":"

      By default, the plugin tracks changes in the following labels for svm, node, and volume objects:

      • svm: svm, state, type, anti_ransomware_state
      • node: node, location, healthy
      • volume: node, volume, svm, style, type, aggr, state, status

      Other objects are not tracked by default.

      These default settings can be overwritten as needed in the relevant templates. For instance, if you want to track junction_path label and size_total metric for Volume, you can overwrite this in the volume template.

      plugins:\n  - ChangeLog:\n      - track:\n        - node\n        - volume\n        - svm\n        - style\n        - type\n        - aggr\n        - state\n        - status\n        - junction_path\n        - size_total\n
      "},{"location":"plugins/#change-types-and-metrics","title":"Change Types and Metrics","text":"

      The ChangeLog plugin publishes a metric with various labels providing detailed information about the change when an object is created, modified, or deleted.

      "},{"location":"plugins/#object-creation","title":"Object Creation","text":"

      When a new object is created, the ChangeLog plugin will publish a metric with the following labels:

      Label Description object name of the ONTAP object that was changed op type of change that was made metric value timestamp when Harvest captured the change. 1698735558 in the example below

      Example of metric shape for object creation:

      change_log{aggr=\"umeng_aff300_aggr2\", cluster=\"umeng-aff300-01-02\", datacenter=\"u2\", index=\"0\", instance=\"localhost:12993\", job=\"prometheus\", node=\"umeng-aff300-01\", object=\"volume\", op=\"create\", style=\"flexvol\", svm=\"harvest\", volume=\"harvest_demo\"} 1698735558\n
      "},{"location":"plugins/#object-modification","title":"Object Modification","text":"

      When an existing object is modified, the ChangeLog plugin will publish a metric with the following labels:

      Label Description object Name of the ONTAP object that was changed op Type of change that was made track Property of the object which was modified new_value New value of the object after the change (only available for label changes and not for metric changes) old_value Previous value of the object before the change (only available for label changes and not for metric changes) metric value Timestamp when Harvest captured the change. 1698735677 in the example below category Type of the change, indicating whether it is a metric or a label change

      Example of metric shape for object modification for label:

      change_log{aggr=\"umeng_aff300_aggr2\", category=\"label\", cluster=\"umeng-aff300-01-02\", datacenter=\"u2\", index=\"1\", instance=\"localhost:12993\", job=\"prometheus\", new_value=\"offline\", node=\"umeng-aff300-01\", object=\"volume\", old_value=\"online\", op=\"update\", style=\"flexvol\", svm=\"harvest\", track=\"state\", volume=\"harvest_demo\"} 1698735677\n

      Example of metric shape for metric value change:

      change_log{aggr=\"umeng_aff300_aggr2\", category=\"metric\", cluster=\"umeng-aff300-01-02\", datacenter=\"u2\", index=\"3\", instance=\"localhost:12993\", job=\"prometheus\", node=\"umeng-aff300-01\", object=\"volume\", op=\"metric_change\", track=\"volume_size_total\", svm=\"harvest\", volume=\"harvest_demo\"} 1698735800\n
      "},{"location":"plugins/#object-deletion","title":"Object Deletion","text":"

      When an object is deleted, the ChangeLog plugin will publish a metric with the following labels:

      Label Description object name of the ONTAP object that was changed op type of change that was made metric value timestamp when Harvest captured the change. 1698735708 in the example below

      Example of metric shape for object deletion:

      change_log{aggr=\"umeng_aff300_aggr2\", cluster=\"umeng-aff300-01-02\", datacenter=\"u2\", index=\"2\", instance=\"localhost:12993\", job=\"prometheus\", node=\"umeng-aff300-01\", object=\"volume\", op=\"delete\", style=\"flexvol\", svm=\"harvest\", volume=\"harvest_demo\"} 1698735708\n
      "},{"location":"plugins/#viewing-the-metrics","title":"Viewing the Metrics","text":"

      You can view the metrics published by the ChangeLog plugin in the ChangeLog Monitor dashboard in Grafana. This dashboard provides a visual representation of the changes tracked by the plugin for volume, svm, and node objects.

      "},{"location":"plugins/#volumetopclients","title":"VolumeTopClients","text":"

      The VolumeTopClients plugin is used to track a volume's top clients for volumes in terms of read and write IOPS, as well as read and write throughput. This plugin is available only through the RestPerf Collector in ONTAP version 9.12 and later.

      "},{"location":"plugins/#enabling-the-plugin_1","title":"Enabling the Plugin","text":"

      Top Clients collection is disabled by default. To enable Top Clients tracking in Harvest, follow these steps:

      1. Ensure you are using ONTAP version 9.12 or later.
      2. Enable the Top Clients collection in the RestPerf Collector Volume template via the VolumeTopClients plugin.

      For detailed steps on how to enable the plugin, refer to the discussion here.

      "},{"location":"plugins/#configuration-parameters","title":"Configuration Parameters","text":""},{"location":"plugins/#max_volumes","title":"max_volumes","text":"

      The max_volumes parameter specifies the maximum number of top volumes to track. By default, this value is set to 5, but it can be configured up to a maximum of 50.

      The plugin will select the top volumes based on the descending order of read IOPS, write IOPS, read throughput, and write throughput in each performance poll. This means that during each performance poll, the plugin will:

      1. Collect the read IOPS, write IOPS, read throughput, and write throughput for all volumes.
      2. Sort the volumes in descending order based on their metric values.
      3. Select the top volumes as specified by max_volumes.
      4. Collect top clients metrics for these volumes.
      "},{"location":"plugins/#viewing-the-metrics_1","title":"Viewing the Metrics","text":"

      You can view the metrics published by the VolumeTopClients plugin in the Volume dashboard under the Top Clients row in Grafana.

      "},{"location":"prepare-7mode-clusters/","title":"ONTAP 7mode","text":"

      NetApp Harvest requires login credentials to access monitored hosts. Although, a generic admin account can be used, it is best practice to create a dedicated monitoring account with the least privilege access.

      ONTAP 7-mode supports only username / password based authentication with NetApp Harvest. Harvest communicates with monitored systems exclusively via HTTPS, which is not enabled by default in Data ONTAP 7-mode. Login as a user with full administrative privileges and execute the following steps.

      "},{"location":"prepare-7mode-clusters/#enabling-https-and-tls-ontap-7-mode-only","title":"Enabling HTTPS and TLS (ONTAP 7-mode only)","text":"

      Verify SSL is configured

      secureadmin status ssl\n

      If ssl is \u2018active\u2019 continue. If not, setup SSL and be sure to choose a Key length (bits) of 2048:

      secureadmin setup ssl\n
      SSL Setup has already been done before. Do you want to proceed? [no] yes\nCountry Name (2 letter code) [US]: NL\nState or Province Name (full name) [California]: Noord-Holland\nLocality Name (city, town, etc.) [Santa Clara]: Schiphol\nOrganization Name (company) [Your Company]: NetApp\nOrganization Unit Name (division): SalesEngineering\nCommon Name (fully qualified domain name) [sdt-7dot1a.nltestlab.hq.netapp.com]:\nAdministrator email: noreply@netapp.com\nDays until expires [5475] :5475 Key length (bits) [512] :2048\n

      Enable management via SSL and enable TLS

      options httpd.admin.ssl.enable on\noptions tls.enable on  \n
      "},{"location":"prepare-7mode-clusters/#creating-ontap-user","title":"Creating ONTAP user","text":""},{"location":"prepare-7mode-clusters/#create-the-role-with-required-capabilities","title":"Create the role with required capabilities","text":"
      role add netapp-harvest-role -c \"Role for performance monitoring by NetApp Harvest\" -a login-http-admin,api-system-get-version,api-system-get-info,api-perf-object-*,api-emsautosupport-log \n
      "},{"location":"prepare-7mode-clusters/#create-a-group-for-this-role","title":"Create a group for this role","text":"
      useradmin group add netapp-harvest-group -c \"Group for performance monitoring by NetApp Harvest\" -r netapp-harvest-role \n
      "},{"location":"prepare-7mode-clusters/#create-a-user-for-the-role-and-enter-the-password-when-prompted","title":"Create a user for the role and enter the password when prompted","text":"
      useradmin user add netapp-harvest -c \"User account for performance monitoring by NetApp Harvest\" -n \"NetApp Harvest\" -g netapp-harvest-group\n

      The user is now created and can be configured for use by NetApp Harvest.

      "},{"location":"prepare-cdot-clusters/","title":"ONTAP cDOT","text":""},{"location":"prepare-cdot-clusters/#prepare-ontap-cdot-cluster","title":"Prepare ONTAP cDOT cluster","text":"

      NetApp Harvest requires login credentials to access monitored hosts. Although a generic admin account can be used, it is better to create a dedicated read-only monitoring account.

      In the examples below, the user, group, roles, etc., use a naming convention of netapp-harvest. These can be modified as needed to match your organizational needs.

      There are few steps required to prepare each system for monitoring. Harvest supports two authentication styles (auth_style) to connect to ONTAP clusters. These are basic_auth or certificate_auth. Both work well, but if you're starting fresh, the recommendation is to create a read-only harvest user on your ONTAP server and use certificate-based TLS authentication.

      Here's a summary of what we're going to do

      1. Create a read-only ONTAP role with the necessary capabilities that Harvest will use to auth and collect data
      2. Create a user account using the role created in step #1
      3. Update the harvest.yml file to use the user account and password created in step #2 and start Harvest.

      There are two ways to create a read-only ONTAP role. Pick the one that best fits your needs.

      • Create a role with read-only access to all API objects via System Manager.
      • Create a role with read-only access to the limited set of APIs Harvest collects via ONTAP's command line interface (CLI).
      "},{"location":"prepare-cdot-clusters/#system-manager","title":"System Manager","text":"

      Open System Manager. Click on CLUSTER in the left menu bar, Settings and Users and Roles.

      In the right column, under Roles, click on Add to add a new role.

      Choose a role name (e.g. harvest2-role). In the REST API PATH field, type /api and select Read-Only for ACCESS. Click on Save.

      In the left column, under Users, click on Add to create a new user. Choose a username. Under Role, select the role that we just created. Under User Login Methods select ONTAPI, and one of the two authentication methods. Press the Add button and select HTTP and one of the authentication methods. Type in a password if you chose Password. Click on Save

      If you chose Password, you can add the username and password to the Harvest configuration file and start Harvest. If you chose Certificate jump to Using Certificate Authentication to generate certificates files.

      System Manager Classic interface

      Open System Manager. Click on the Settings icon in the top-right corner of the window.

      Click on Roles in the left menu bar and click Add. Choose a role name (e.g. harvest2-role).

      Under Role Attributes click on Add, under Command type DEFAULT, leave Query empty, select readonly under Access Level, click on OK and Add.

      After you click on Add, this is what you should see:

      Now we need to create a user. Click on Users in the left menu bar and Add. Choose a username and password. Under User Login Methods click on Add, select ontapi as Application and select the role that we just created as Role. Repeat by clicking on Add, select http as Application and select the role that we just created as Role. Click on Add in the pop-up window to save.

      "},{"location":"prepare-cdot-clusters/#ontap-cli","title":"ONTAP CLI","text":"

      We are going to:

      1. create a Harvest role with read-only access to a limited set of objects
      2. create a Harvest user and assign it to that role

      Login to the CLI of your cDOT ONTAP system using SSH.

      "},{"location":"prepare-cdot-clusters/#least-privilege-approach","title":"Least-privilege approach","text":"

      Verify there are no errors when you copy/paste these. Warnings are fine.

      security login role create -role harvest2-role -access readonly -cmddirname \"cluster\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"event notification destination show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"event notification destination\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"lun\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"metrocluster configuration-settings mediator add\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"network fcp adapter show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"network interface\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"network port show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"network route show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"qos adaptive-policy-group\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"qos policy-group\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"qos workload show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"security\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"snapmirror\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"statistics\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage aggregate\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage disk\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage encryption disk\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage failover show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage iscsi-initiator show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"storage shelf\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system chassis fru show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system health alert show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system health status show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system health subsystem show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system license show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system node\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"system service-processor show\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"version\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"volume\"\nsecurity login role create -role harvest2-role -access readonly -cmddirname \"vserver\"\n
      "},{"location":"prepare-cdot-clusters/#create-harvest-user-and-associate-with-the-harvest-role","title":"Create harvest user and associate with the harvest role","text":"

      Use this for password authentication

      # If the harvest2 user does not exist, you will be prompted to enter a password\nsecurity login create -user-or-group-name harvest2 -application ontapi -role harvest2-role -authentication-method password\n

      Or this for certificate authentication

      security login create -user-or-group-name harvest2 -application ontapi -role harvest2-role -authentication-method cert\n
      "},{"location":"prepare-cdot-clusters/#create-rest-role","title":"Create REST role","text":"

      Replace $ADMIN_VSERVER with your SVM admin name.

      security login rest-role create -role harvest2-rest-role -access readonly -api /api -vserver $ADMIN_VSERVER\n
      Least-privilege approach for REST

      If you are on ONTAP version 9.14.X or later, instead of the above command, you can use the following commands to create a REST role with read-only access to a limited set of objects.

      Since REST roles are defined in terms of legacy roles, if you have already created a legacy role with the same name, you will need to delete it first or use a different name.

      security login rest-role create -role harvest-rest-role -access readonly -api /api/cloud/targets\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/counter/tables\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/mediators\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/metrocluster/diagnostics\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/nodes\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/ntp/servers\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/peers\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/cluster/sensors\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/name-services/ldap\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/name-services/nis\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/ethernet/ports\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/fc/ports\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/ip/interfaces\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/ip/ports\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/network/ip/routes\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/support/alerts\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/cifs/services\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/cifs/sessions\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/cifs/shares\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/locks\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/ndmp/sessions\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/nfs/connected-clients\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/nfs/export-policies\n# s3 is buggy in 9.15, use protocols endpoint instead. See https://mysupport.netapp.com/site/bugs-online/product/ONTAP/JiraNgage/CONTAP-210232\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/protocols\n# security login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/s3/buckets\n# security login rest-role create -role harvest-rest-role -access readonly -api /api/protocols/s3/services\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/accounts\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/audit/destinations\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/certificates\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/login/messages\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/security/ssh\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/snapmirror/relationships\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/aggregates\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/disks\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/flexcache/flexcaches\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/luns\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/namespaces\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/qtrees\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/qos/policies\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/qos/workloads\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/quota/reports\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/shelves\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/storage/volumes\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/auto-update\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/autosupport\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/ems/destinations\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/ems/events\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/support/ems/messages\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/svm/peers\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/svm/svms\n\n# Private CLI endpoints\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/aggr\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/cluster/date\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/disk\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/network/interface\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/node\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/qos/adaptive-policy-group\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/qos/policy-group\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/qos/workload\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/snapmirror\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/snapshot/policy\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/storage/failover\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/storage/shelf\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/system/chassis/fru\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/system/health/subsystem\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/volume\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/vserver\nsecurity login rest-role create -role harvest-rest-role -access readonly -api /api/private/cli/vserver/object-store-server/bucket/policy\n

      See #2991 for more information.

      "},{"location":"prepare-cdot-clusters/#associate-rest-role-with-harvest-user","title":"Associate REST role with harvest user","text":"

      Using password authentication

      security login create -user-or-group-name harvest2 -application http -role harvest2-rest-role -authentication-method password\n
      If you get an error command failed: duplicate entry when running the previous command

      Remove the previous entry and recreate like so:

      security login delete -user-or-group-name harvest2 -application http -authentication-method *\nsecurity login create -user-or-group-name harvest2 -application http -role harvest2-rest-role -authentication-method password\n

      Using certificate authentication

      security login create -user-or-group-name harvest2 -application http -role harvest2-rest-role -authentication-method cert\n
      If you get an error command failed: duplicate entry when running the previous command

      Remove the previous entry and recreate like so:

      security login delete -user-or-group-name harvest2 -application http -authentication-method *\nsecurity login create -user-or-group-name harvest2 -application http -role harvest2-rest-role -authentication-method cert\n
      "},{"location":"prepare-cdot-clusters/#verify-that-the-harvest-role-has-web-access","title":"Verify that the harvest role has web access","text":"
      vserver services web access show -role harvest2-role -name ontapi\nvserver services web access show -role harvest2-rest-role -name rest\nvserver services web access show -role harvest2-rest-role -name docs-api\n

      If any entries are missing, enable access by running the following. Replace $ADMIN_VSERVER with your SVM admin name.

      vserver services web access create -vserver $ADMIN_VSERVER -name ontapi -role harvest2-role\nvserver services web access create -vserver $ADMIN_VSERVER -name rest -role harvest2-rest-role\nvserver services web access create -vserver $ADMIN_VSERVER -name docs-api -role harvest2-rest-role\n

      "},{"location":"prepare-cdot-clusters/#7-mode-cli","title":"7-Mode CLI","text":"

      Login to the CLI of your 7-Mode ONTAP system (e.g. using SSH). First, we create a user role. If you want to give the user readonly access to all API objects, type in the following command:

      useradmin role modify harvest2-role -a login-http-admin,api-system-get-version, \\\napi-system-get-info,api-perf-object-*,api-ems-autosupport-log,api-diagnosis-status-get, \\\napi-lun-list-info,api-diagnosis-subsystem-config-get-iter,api-disk-list-info, \\\napi-diagnosis-config-get-iter,api-aggr-list-info,api-volume-list-info, \\\napi-storage-shelf-environment-list-info,api-qtree-list,api-quota-report\n
      "},{"location":"prepare-cdot-clusters/#using-certificate-authentication","title":"Using Certificate Authentication","text":"

      See comments here for troubleshooting client certificate authentication.

      Client certificate authentication allows you to authenticate with your ONTAP cluster without including username/passwords in your harvest.yml file. The process to set up client certificates is straightforward, although self-signed certificates introduce more work as does Go's strict treatment of common names.

      Unless you've installed production certificates on your ONTAP cluster, you'll need to replace your cluster's common-name-based self-signed certificates with a subject alternative name-based certificate. After that step is completed, we'll create client certificates and add those for passwordless login.

      If you can't or don't want to replace your ONTAP cluster certificates, there are some workarounds. You can

      • Use use_insecure_tls: true in your harvest.yml to disable certificate verification
      • Change your harvest.yml to connect via hostname instead of IP address
      "},{"location":"prepare-cdot-clusters/#create-self-signed-subject-alternate-name-certificates-for-ontap","title":"Create Self-Signed Subject Alternate Name Certificates for ONTAP","text":"

      Subject alternate name (SAN) certificates allow multiple hostnames in a single certificate. Starting with Go 1.3, when connecting to a cluster via its IP address, the CN field in the server certificate is ignored. This often causes errors like this: x509: cannot validate certificate for 127.0.0.1 because it doesn't contain any IP SANs

      "},{"location":"prepare-cdot-clusters/#overview-of-steps-to-create-a-self-signed-san-certificate-and-make-ontap-use-it","title":"Overview of steps to create a self-signed SAN certificate and make ONTAP use it","text":"
      1. Create a root key
      2. Create a root certificate authority certificate
      3. Create a SAN certificate for your ONTAP cluster, using #2 to create it
      4. Install root ca certificate created in step #2 on cluster
      5. Install SAN certificate created in step #3 on your cluster
      6. Modify your cluster/SVM to use the new certificate installed at step #5
      "},{"location":"prepare-cdot-clusters/#setup","title":"Setup","text":"
      # create a place to store the certificate authority files, adjust as needed\nmkdir -p ca/{private,certs}\n
      "},{"location":"prepare-cdot-clusters/#create-a-root-key","title":"Create a root key","text":"
      cd ca\n# generate a private key that we will use to create our self-signed certificate authority\nopenssl genrsa -out private/ca.key.pem 4096\nchmod 400 private/ca.key.pem\n
      "},{"location":"prepare-cdot-clusters/#create-a-root-certificate-authority-certificate","title":"Create a root certificate authority certificate","text":"

      Download the sample openssl.cnf file and put it in the directory we created in setup. Edit line 9, changing dir to point to your ca directory created in setup.

      openssl req -config openssl.cnf -key private/ca.key.pem -new -x509 -days 7300 -sha256 -extensions v3_ca -out certs/ca.cert.pem\n\n# Verify\nopenssl x509 -noout -text -in certs/ca.cert.pem\n\n# Make sure these are present\n    Signature Algorithm: sha256WithRSAEncryption               <======== Signature Algorithm can not be sha-1\n        X509v3 extensions:\n            X509v3 Subject Key Identifier: \n                --removed\n            X509v3 Authority Key Identifier: \n                --removed\n\n            X509v3 Basic Constraints: critical\n                CA:TRUE                                        <======== CA must be true\n            X509v3 Key Usage: critical\n                Digital Signature, Certificate Sign, CRL Sign  <======== Digital and certificate signature\n
      "},{"location":"prepare-cdot-clusters/#create-a-san-certificate-for-your-ontap-cluster","title":"Create a SAN certificate for your ONTAP cluster","text":"

      First, we'll create the certificate signing request and then the certificate. In this example, the ONTAP cluster is named umeng-aff300-05-06, update accordingly.

      Download the sample server_cert.cnf file and put it in the directory we created in setup. Edit lines 18-21 to include your ONTAP cluster hostnames and IP addresses. Edit lines 6-11 with new names as needed.

      openssl req -new -newkey rsa:4096 -nodes -sha256 -subj \"/\" -config server_cert.cnf -outform pem -out umeng-aff300-05-06.csr -keyout umeng-aff300-05-06.key\n\n# Verify\nopenssl req -text -noout -in umeng-aff300-05-06.csr\n\n# Make sure these are present\n        Attributes:\n        Requested Extensions:\n            X509v3 Subject Alternative Name:         <======== Section that lists alternate DNS and IP names\n                DNS:umeng-aff300-05-06-cm.rtp.openenglab.netapp.com, DNS:umeng-aff300-05-06, IP Address:10.193.48.11, IP Address:10.193.48.11\n    Signature Algorithm: sha256WithRSAEncryption     <======== Signature Algorithm can not be sha-1\n

      We'll now use the certificate signing request and the recently created certificate authority to create a new SAN certificate for our cluster.

      openssl x509 -req -sha256 -days 30 -in umeng-aff300-05-06.csr -CA certs/ca.cert.pem -CAkey private/ca.key.pem -CAcreateserial -out umeng-aff300-05-06.crt -extensions req_ext -extfile server_cert.cnf\n\n# Verify\nopenssl x509 -text -noout -in umeng-aff300-05-06.crt\n\n# Make sure these are present\nX509v3 extensions:\n            X509v3 Subject Alternative Name:       <======== Section that lists alternate DNS and IP names\n                DNS:umeng-aff300-05-06-cm.rtp.openenglab.netapp.com, DNS:umeng-aff300-05-06, IP Address:10.193.48.11, IP Address:10.193.48.11\n    Signature Algorithm: sha256WithRSAEncryption   <======== Signature Algorithm can not be sha-1\n
      "},{"location":"prepare-cdot-clusters/#install-root-ca-certificate-on-cluster","title":"Install Root CA Certificate On Cluster","text":"

      Login to your cluster with admin credentials and install the server certificate authority. Copy from ca/certs/ca.cert.pem

      ssh admin@IP\numeng-aff300-05-06::*> security certificate install -type server-ca\n\nPlease enter Certificate: Press <Enter> when done\n-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n\nYou should keep a copy of the CA-signed digital certificate for future reference.\n\nThe installed certificate's CA and serial number for reference:\nCA: ntap\nSerial: 46AFFC7A3A9999999E8FB2FEB0\n\nThe certificate's generated name for reference: ntap\n

      Now install the server certificate we created above with SAN. Copy certificate from ca/umeng-aff300-05-06.crt and private key from ca/umeng-aff300-05-06.key

      umeng-aff300-05-06::*> security certificate install -type server\n\nPlease enter Certificate: Press <Enter> when done\n-----BEGIN CERTIFICATE-----\n..\n-----END CERTIFICATE-----\n\nPlease enter Private Key: Press <Enter> when done\n-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n\nPlease enter certificates of Certification Authorities (CA) which form the certificate chain of the server certificate. This starts with the issuing CA certificate of the server certificate and can range up to the root CA certificate.\n\nDo you want to continue entering root and/or intermediate certificates {y|n}: n\n

      If ONTAP tells you the provided certificate does not have a common name in the subject field, type the hostname of the cluster like this:

      The provided certificate does not have a common name in the subject field.\n\nEnter a valid common name to continue installation of the certificate:\n\nEnter a valid common name to continue installation of the certificate: umeng-aff300-05-06-cm.rtp.openenglab.netapp.com\n\nYou should keep a copy of the private key and the CA-signed digital certificate for future reference.\n\nThe installed certificate's CA and serial number for reference:\nCA: ntap\nSerial: 67A94AA25B229A68AC5BABACA8939A835AA998A58\n\nThe certificate's generated name for reference: umeng-aff300-05-06-cm.rtp.openenglab.netapp.com\n
      "},{"location":"prepare-cdot-clusters/#modify-the-admin-svm-to-use-the-new-certificate","title":"Modify the admin SVM to use the new certificate","text":"

      We'll modify the cluster's admin SVM to use the just installed server certificate and certificate authority.

      vserver show -type admin -fields vserver,type\nvserver            type\n------------------ -----\numeng-aff300-05-06 admin\n\numeng-aff300-05-06::*> ssl modify -vserver umeng-aff300-05-06 -server-enabled true -serial 67A94AA25B229A68AC5BABACA8939A835AA998A58 -ca ntap\n  (security ssl modify)\n

      You can verify the certificate(s) are installed and working by using openssl like so:

      openssl s_client -CAfile certs/ca.cert.pem -showcerts -servername server -connect umeng-aff300-05-06-cm.rtp.openenglab.netapp.com:443\n\nCONNECTED(00000005)\ndepth=1 C = US, ST = NC, L = RTP, O = ntap, OU = ntap\nverify return:1\ndepth=0 \nverify return:1\n...\n

      without the -CAfile, openssl will report

      CONNECTED(00000005)\ndepth=0 \nverify error:num=20:unable to get local issuer certificate\nverify return:1\ndepth=0 \nverify error:num=21:unable to verify the first certificate\nverify return:1\n---\n
      "},{"location":"prepare-cdot-clusters/#create-client-certificates-for-password-less-login","title":"Create Client Certificates for Password-less Login","text":"

      Copy the server certificate we created above into the Harvest install directory.

      cp ca/umeng-aff300-05-06.crt /opt/harvest\ncd /opt/harvest\n

      Create a self-signed client key and certificate with the same name as the hostname where Harvest is running. It's not required to name the key/cert pair after the hostname, but if you do, Harvest will load them automatically when you specify auth_style: certificate_auth otherwise you can point to them directly. See Pollers for details.

      Change the common name to the ONTAP user you set up with the harvest role above. e.g harvest2

      cd /opt/harvest\nmkdir cert\nopenssl req -x509 -nodes -days 1095 -newkey rsa:2048 -keyout cert/$(hostname).key -out cert/$(hostname).pem -subj \"/CN=harvest2\"\n
      "},{"location":"prepare-cdot-clusters/#install-client-certificates-on-cluster","title":"Install Client Certificates on Cluster","text":"

      Login to your cluster with admin credentials and install the client certificate. Copy from cert/$(hostname).pem

      ssh admin@IP\numeng-aff300-05-06::*>  security certificate install -type client-ca -vserver umeng-aff300-05-06\n\nPlease enter Certificate: Press <Enter> when done\n-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n\nYou should keep a copy of the CA-signed digital certificate for future reference.\n\nThe installed certificate's CA and serial number for reference:\nCA: cbg\nSerial: B77B59444444CCCC\n\nThe certificate's generated name for reference: cbg_B77B59444444CCCC\n

      Now that the client certificate is installed, let's enable it.

      umeng-aff300-05-06::*> ssl modify -vserver umeng-aff300-05-06 -client-enabled true\n  (security ssl modify)\n

      Verify with a recent version of curl. If you are running on a Mac see below.

      curl --cacert umeng-aff300-05-06.crt --key cert/$(hostname).key --cert cert/$(hostname).pem https://umeng-aff300-05-06-cm.rtp.openenglab.netapp.com/api/storage/disks\n
      "},{"location":"prepare-cdot-clusters/#update-harvestyml-to-use-client-certificates","title":"Update Harvest.yml to use client certificates","text":"

      Update the poller section with auth_style: certificate_auth like this:

        u2-cert: \n    auth_style: certificate_auth\n    addr: umeng-aff300-05-06-cm.rtp.openenglab.netapp.com\n

      Restart your poller and enjoy your password-less life-style.

      "},{"location":"prepare-cdot-clusters/#macos","title":"macOS","text":"

      The version of curl installed on macOS up through Monterey is not recent enough to work with self-signed SAN certs. You will need to install a newer version of curl via Homebrew, MacPorts, source, etc.

      Example of failure when running with an older version of curl - you will see this in client auth test step above.

      curl --version\ncurl 7.64.1 (x86_64-apple-darwin20.0) libcurl/7.64.1 (SecureTransport) LibreSSL/2.8.3 zlib/1.2.11 nghttp2/1.41.0\n\ncurl --cacert umeng-aff300-05-06.crt --key cert/cgrindst-mac-0.key --cert cert/cgrindst-mac-0.pem https://umeng-aff300-05-06-cm.rtp.openenglab.netapp.com/api/storage/disks\n\ncurl: (60) SSL certificate problem: unable to get local issuer certificate\n

      Let's install curl via Homebrew. Make sure you don't miss the message that Homebrew prints about your path.

      If you need to have curl first in your PATH, run:\n  echo 'export PATH=\"/usr/local/opt/curl/bin:$PATH\"' >> /Users/cgrindst/.bash_profile\n

      Now when we make a client auth request with our self-signed certificate, it works! \\o/

      brew install curl\n\ncurl --version\ncurl 7.80.0 (x86_64-apple-darwin20.6.0) libcurl/7.80.0 (SecureTransport) OpenSSL/1.1.1l zlib/1.2.11 brotli/1.0.9 zstd/1.5.0 libidn2/2.3.2 libssh2/1.10.0 nghttp2/1.46.0 librtmp/2.3 OpenLDAP/2.6.0\nRelease-Date: 2021-11-10\nProtocols: dict file ftp ftps gopher gophers http https imap imaps ldap ldaps mqtt pop3 pop3s rtmp rtsp scp sftp smb smbs smtp smtps telnet tftp \nFeatures: alt-svc AsynchDNS brotli GSS-API HSTS HTTP2 HTTPS-proxy IDN IPv6 Kerberos Largefile libz MultiSSL NTLM NTLM_WB SPNEGO SSL TLS-SRP UnixSockets zstd\n\ncurl --cacert umeng-aff300-05-06.crt --key cert/cgrindst-mac-0.key --cert cert/cgrindst-mac-0.pem https://umeng-aff300-05-06-cm.rtp.openenglab.netapp.com/api/storage/disks\n\n{\n  \"records\": [\n    {\n      \"name\": \"1.1.22\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/storage/disks/1.1.22\"\n        }\n      }\n    }\n}\n

      Change directory to your Harvest home directory (replace /opt/harvest/ if this is not the default):

      $ cd /opt/harvest/\n

      Generate an SSL cert and key pair with the following command. Note that it's preferred to generate these files using the hostname of the local machine. The command below assumes debian8 as our hostname name and harvest2 as the user we created in the previous step:

      openssl req -x509 -nodes -days 1095 -newkey rsa:2048 -keyout cert/debian8.key \\\n -out cert/debian8.pem  -subj \"/CN=harvest2\"\n

      Next, open the public key (debian8.pem in our example) and copy all of its content. Login into your ONTAP CLI and run this command by replacing CLUSTER with the name of your cluster.

      security certificate install -type client-ca -vserver CLUSTER\n

      Paste the public key content and hit enter. Output should be similar to this:

      jamaica::> security certificate install -type client-ca -vserver jamaica \n\nPlease enter Certificate: Press <Enter> when done\n-----BEGIN CERTIFICATE-----                       \nMIIDETCCAfmgAwIBAgIUP9EUXyl2BDSUOkNEcDU0yqbJ29IwDQYJKoZIhvcNAQEL\nBQAwGDEWMBQGA1UEAwwNaGFydmVzdDItY2xpMzAeFw0yMDEwMDkxMjA0MDhaFw0y\nMzEwMDktcGFueSBMdGQxFzAVBgNVBAMlc3QyLWNsaTMwggEiMA0tcGFueSBGCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVVy25BeCRoGCJWFOlyUL7Ddkze4Hl2/6u\nqye/3mk5vBNsGuXUrtad5XfBB70Ez9hWl5sraLiY68ro6MyX1icjiUTeaYDvS/76\nIw7HeXJ5Pyb/fWth1nePunytoLyG/vaTCySINkIV5nlxC+k0X3wWFJdfJzhloPtt\n1Vdm7aCF2q6a2oZRnUEBGQb6t5KyF0/Xh65mvfgB0pl/AS2HY5Gz+~L54Xyvs+BY\nV7UmTop7WBYl0L3QXLieERpHXnyOXmtwlm1vG5g4n/0DVBNTBXjEdvc6oRh8sxBN\nZlQWRApE7pa/I1bLD7G2AiS4UcPmR4cEpPRVEsOFOaAN3Z3YskvnAgMBAAGjUzBR\nMB0GA1UdDgQWBBQr4syV6TCcgO/5EcU/F8L2YYF15jAfBgNVHSMEGDAWgBQr4syV\n6TCcgO/5EcU/F8L2YYF15jAPBgNVHRMdfdfwerH/MA0GCSqGSIb^ECd3DQEBCwUA\nA4IBAQBjP1BVhClRKkO/M3zlWa2L9Ztce6SuGwSnm6Ebmbs+iMc7o2N9p3RmV6Xl\nh6NcdXRzzPAVrUoK8ewhnBzdghgIPoCI6inAf1CUhcCX2xcnE/osO+CfvKuFnPYE\nWQ7UNLsdfka0a9kTK13r3GMs09z/VsDs0gD8UhPjoeO7LQhdU9tJ/qOaSP3s48pv\nsYzZurHUgKmVOaOE4t9DAdevSECEWCETRETA$Vbn%@@@%%rcdrctru65ryFaByb+\nhTtGhDnoHwzt/cAGvLGV/RyWdGFAbu7Fb1rV94ceggE7nh1FqbdLH9siot6LlnQN\nMhEWp5PYgndOW49dDYUxoauCCkiA\n-----END CERTIFICATE-----\n\n\nYou should keep a copy of the CA-signed digital certificate for future reference.\n\nThe installed certificate's CA and serial number for reference:\nCA: harvest2\nSerial: 3FD1145F2976043012213d3009095534CCRDBD2\n\nThe certificate's generated name for reference: harvest2\n

      Finally, we need to enable SSL authentication with the following command (replace CLUSTER with the name of your cluster):

      security ssl modify -client-enabled true -vserver CLUSTER\n
      "},{"location":"prepare-cdot-clusters/#reference","title":"Reference","text":"
      • https://github.com/jcbsmpsn/golang-https-example
      "},{"location":"prepare-fsx-clusters/","title":"Amazon FSx for ONTAP","text":""},{"location":"prepare-fsx-clusters/#prepare-amazon-fsx-for-ontap","title":"Prepare Amazon FSx for ONTAP","text":"

      To set up Harvest and FSx make sure you read through Monitoring FSx for ONTAP file systems using Harvest and Grafana

      "},{"location":"prepare-fsx-clusters/#supported-harvest-dashboards","title":"Supported Harvest Dashboards","text":"

      Amazon FSx for ONTAP exposes a different set of metrics than ONTAP cDOT. That means a limited set of out-of-the-box dashboards are supported and some panels may be missing information.

      The dashboards that work with FSx are tagged with fsx and listed below:

      • ONTAP: cDOT
      • ONTAP: Cluster
      • ONTAP: Data Protection Snapshots
      • ONTAP: Datacenter
      • ONTAP: FlexGroup
      • ONTAP: LUN
      • ONTAP: NFS Troubleshooting
      • ONTAP: Quota
      • ONTAP: Security
      • ONTAP: SVM
      • ONTAP: Volume
      • ONTAP: Volume by SVM
      • ONTAP: Volume Deep Dive
      "},{"location":"prepare-storagegrid-clusters/","title":"StorageGRID","text":""},{"location":"prepare-storagegrid-clusters/#prepare-storagegrid-cluster","title":"Prepare StorageGRID cluster","text":"

      NetApp Harvest requires login credentials to access StorageGRID hosts. Although, a generic admin account can be used, it is better to create a dedicated monitoring user with the fewest permissions.

      Here's a summary of what we're going to do

      1. Create a StorageGRID group with the necessary capabilities that Harvest will use to auth and collect data
      2. Create a user assigned to the group created in step #1.
      "},{"location":"prepare-storagegrid-clusters/#create-storagegrid-group-permissions","title":"Create StorageGRID group permissions","text":"

      These steps are documented here.

      You will need a root or admin account to create a new group permission.

      1. Select CONFIGURATION > Access control > Admin groups
      2. Select Create group
      3. Select Local group
      4. Enter a display name for the group, which you can update later as required. For example, Harvest or monitoring.
      5. Enter a unique name for the group, which you cannot update later.
      6. Select Continue
      7. On the Manage group permissions screen, select the permissions you want. At a minimum, Harvest requires the Tenant accounts and Metrics query permissions.
      8. Select Save changes

      "},{"location":"prepare-storagegrid-clusters/#create-a-storagegrid-user","title":"Create a StorageGRID user","text":"

      These steps are documented here.

      You will need a root or admin account to create a new user.

      1. Select CONFIGURATION > Access control > Admin users
      2. Select Create user
      3. Enter the user\u2019s full name, a unique username, and a password.
      4. Select Continue.
      5. Assign the user to the previously created harvest group.
      6. Select Create user and select Finish.

      "},{"location":"prepare-storagegrid-clusters/#reference","title":"Reference","text":"

      See group permissions for more information on StorageGRID permissions.

      "},{"location":"prometheus-exporter/","title":"Prometheus Exporter","text":"Prometheus Install

      The information below describes how to setup Harvest's Prometheus exporter. If you need help installing or setting up Prometheus, check out their documentation.

      "},{"location":"prometheus-exporter/#overview","title":"Overview","text":"

      The Prometheus exporter is responsible for:

      • formatting metrics into the Prometheus line protocol
      • creating a web-endpoint on http://<ADDR>:<PORT>/metrics (or https: if TLS is enabled) for Prometheus to scrape

      A web end-point is required because Prometheus scrapes Harvest by polling that end-point.

      In addition to the /metrics end-point, the Prometheus exporter also serves an overview of all metrics and collectors available on its root address scheme://<ADDR>:<PORT>/.

      Because Prometheus polls Harvest, don't forget to update your Prometheus configuration and tell Prometheus how to scrape each poller.

      There are two ways to configure the Prometheus exporter: using a port range or individual ports.

      The port range is more flexible and should be used when you want multiple pollers all exporting to the same instance of Prometheus. Both options are explained below.

      "},{"location":"prometheus-exporter/#parameters","title":"Parameters","text":"

      All parameters of the exporter are defined in the Exporters section of harvest.yml.

      An overview of all parameters:

      parameter type description default port_range int-int (range), overrides port if specified lower port to upper port (inclusive) of the HTTP end-point to create when a poller specifies this exporter. Starting at lower port, each free port will be tried sequentially up to the upper port. port int, required if port_range is not specified port of the HTTP end-point local_http_addr string, optional address of the HTTP server Harvest starts for Prometheus to scrape:use localhost to serve only on the local machineuse 0.0.0.0 (default) if Prometheus is scrapping from another machine 0.0.0.0 global_prefix string, optional add a prefix to all metrics (e.g. netapp_) allow_addrs list of strings, optional allow access only if host matches any of the provided addresses allow_addrs_regex list of strings, optional allow access only if host address matches at least one of the regular expressions cache_max_keep string (Go duration format), optional maximum amount of time metrics are cached (in case Prometheus does not timely collect the metrics) 5m add_meta_tags bool, optional add HELP and TYPE metatags to metrics (currently no useful information, but required by some tools) false sort_labels bool, optional sort metric labels before exporting. Some open-metrics scrapers report stale metrics when labels are not sorted. false tls tls optional If present, enables TLS transport. If running in a container, see note tls cert_file, key_file required child of tls Relative or absolute path to TLS certificate and key file. TLS 1.3 certificates required.FIPS complaint P-256 TLS 1.3 certificates can be created with bin/harvest admin tls create server, openssl, mkcert, etc.

      A few examples:

      "},{"location":"prometheus-exporter/#port_range","title":"port_range","text":"
      Exporters:\n  prom-prod:\n    exporter: Prometheus\n    port_range: 2000-2030\nPollers:\n  cluster-01:\n    exporters:\n      - prom-prod\n  cluster-02:\n    exporters:\n      - prom-prod\n  cluster-03:\n    exporters:\n      - prom-prod\n  # ... more\n  cluster-16:\n    exporters:\n      - prom-prod\n

      Sixteen pollers will collect metrics from 16 clusters and make those metrics available to a single instance of Prometheus named prom-prod. Sixteen web end-points will be created on the first 16 available free ports between 2000 and 2030 (inclusive).

      After staring the pollers in the example above, running bin/harvest status shows the following. Note that ports 2000 and 2003 were not available so the next free port in the range was selected. If no free port can be found an error will be logged.

      Datacenter   Poller       PID     PromPort  Status              \n++++++++++++ ++++++++++++ +++++++ +++++++++ ++++++++++++++++++++\nDC-01        cluster-01   2339    2001      running         \nDC-01        cluster-02   2343    2002      running         \nDC-01        cluster-03   2351    2004      running         \n...\nDC-01        cluster-14   2405    2015      running         \nDC-01        cluster-15   2502    2016      running         \nDC-01        cluster-16   2514    2017      running         \n
      "},{"location":"prometheus-exporter/#allow_addrs","title":"allow_addrs","text":"
      Exporters:\n  my_prom:\n    allow_addrs:\n      - 192.168.0.102\n      - 192.168.0.103\n

      will only allow access from exactly these two addresses.

      "},{"location":"prometheus-exporter/#allow_addrs_regex","title":"allow_addrs_regex","text":"
      Exporters:\n  my_prom:\n    allow_addrs_regex:\n      - `^192.168.0.\\d+$`\n

      will only allow access from the IP4 range 192.168.0.0-192.168.0.255.

      "},{"location":"prometheus-exporter/#configure-prometheus-to-scrape-harvest-pollers","title":"Configure Prometheus to scrape Harvest pollers","text":"

      There are two ways to tell Prometheus how to scrape Harvest: using HTTP service discovery (SD) or listing each poller individually.

      HTTP service discovery is the more flexible of the two. It is also less error-prone, and easier to manage. Combined with the port_range configuration described above, SD is the least effort to configure Prometheus and the easiest way to keep both Harvest and Prometheus in sync.

      NOTE HTTP service discovery does not work with Docker yet. With Docker, you will need to list each poller individually or if possible, use the Docker Compose workflow that uses file service discovery to achieve a similar ease-of-use as HTTP service discovery.

      See the example below for how to use HTTP SD and port_range together.

      "},{"location":"prometheus-exporter/#prometheus-http-service-discovery","title":"Prometheus HTTP Service Discovery","text":"

      HTTP service discovery was introduced in Prometheus version 2.28.0. Make sure you're using that version or later.

      The way service discovery works is:

      • shortly after a poller starts up, it registers with the SD node (if one exists)
      • the poller sends a heartbeat to the SD node, by default every 45s.
      • if a poller fails to send a heartbeat, the SD node removes the poller from the list of active targets after a minute
      • the SD end-point is reachable via SCHEMA:///api/v1/sd

        To use HTTP service discovery you need to:

        1. tell Harvest to start the HTTP service discovery process
        2. tell Prometheus to use the HTTP service discovery endpoint
        "},{"location":"prometheus-exporter/#enable-http-service-discovery-in-harvest","title":"Enable HTTP service discovery in Harvest","text":"

        Add the following to your harvest.yml

        Admin:\n  httpsd:\n    listen: :8887\n

        This tells Harvest to create an HTTP service discovery end-point on interface 0.0.0.0:8887. If you want to only listen on localhost, use 127.0.0.1:<port> instead. See net.Dial for details on the supported listen formats.

        Start the SD process by running bin/harvest admin start. Once it is started, you can curl the end-point for the list of running Harvest pollers.

        curl -s 'http://localhost:8887/api/v1/sd' | jq .\n[\n  {\n    \"targets\": [\n      \"10.0.1.55:12990\",\n      \"10.0.1.55:15037\",\n      \"127.0.0.1:15511\",\n      \"127.0.0.1:15008\",\n      \"127.0.0.1:15191\",\n      \"10.0.1.55:15343\"\n    ]\n  }\n]\n
        "},{"location":"prometheus-exporter/#harvest-http-service-discovery-options","title":"Harvest HTTP Service Discovery options","text":"

        HTTP service discovery (SD) is configured in the Admin > httpsd section of your harvest.yml.

        parameter type description default listen required Interface and port to listen on, use localhost:PORT or :PORT for all interfaces auth_basic optional If present, enables basic authentication on /api/v1/sd end-point auth_basic username, password required child of auth_basic tls optional If present, enables TLS transport. If running in a container, see note tls cert_file, key_file required child of tls Relative or absolute path to TLS certificate and key file. TLS 1.3 certificates required.FIPS complaint P-256 TLS 1.3 certificates can be created with bin/harvest admin tls create server ssl_cert, ssl_key optional if auth_style is certificate_auth Absolute paths to SSL (client) certificate and key used to authenticate with the target system.If not provided, the poller will look for <hostname>.key and <hostname>.pem in $HARVEST_HOME/cert/.To create certificates for ONTAP systems, see using certificate authentication heart_beat optional, Go Duration format How frequently each poller sends a heartbeat message to the SD node 45s expire_after optional, Go Duration format If a poller fails to send a heartbeat, the SD node removes the poller after this duration 1m"},{"location":"prometheus-exporter/#enable-http-service-discovery-in-prometheus","title":"Enable HTTP service discovery in Prometheus","text":"

        Edit your prometheus.yml and add the following section

        $ vim /etc/prometheus/prometheus.yml

        scrape_configs:\n  - job_name: harvest\n    http_sd_configs:\n      - url: http://localhost:8887/api/v1/sd\n

        Harvest and Prometheus both support basic authentication for HTTP SD end-points. To enable basic auth, add the following to your Harvest config.

        Admin:\n  httpsd:\n    listen: :8887\n    # Basic auth protects GETs and publishes\n    auth_basic:\n      username: admin\n      password: admin\n

        Don't forget to also update your Prometheus config with the matching basic_auth credentials.

        "},{"location":"prometheus-exporter/#prometheus-http-service-discovery-and-port-range","title":"Prometheus HTTP Service Discovery and Port Range","text":"

        HTTP SD combined with Harvest's port_range feature leads to significantly less configuration in your harvest.yml. For example, if your clusters all export to the same Prometheus instance, you can refactor the per-poller exporter into a single exporter shared by all clusters in Defaults as shown below:

        Notice that none of the pollers specify an exporter. Instead, all the pollers share the single exporter named prometheus-r listed in Defaults. prometheus-r is the only exporter defined and as specified will manage up to 1,000 Harvest Prometheus exporters.

        If you add or remove more clusters in the Pollers section, you do not have to change Prometheus since it dynamically pulls the targets from the Harvest admin node.

        Admin:\n  httpsd:\n    listen: :8887\n\nExporters:\n  prometheus-r:\n    exporter: Prometheus\n    port_range: 13000-13999\n\nDefaults:\n  collectors:\n    - Zapi\n    - ZapiPerf\n  use_insecure_tls: false\n  auth_style: password\n  username: admin\n  password: pass\n  exporters:\n    - prometheus-r\n\nPollers:\n  umeng_aff300:\n    datacenter: meg\n    addr: 10.193.48.11\n\n  F2240-127-26:\n    datacenter: meg\n    addr: 10.193.6.61\n\n  # ... add more clusters\n
        "},{"location":"prometheus-exporter/#static-scrape-targets","title":"Static Scrape Targets","text":"

        If we define two Prometheus exporters at ports: 12990 and 14567 in the harvest.yml file like so, you need to add two targets to your prometheus.yml too.

        $ vim harvest.yml\n
        Exporters:\n  prometheus1:\n    exporter: Prometheus\n    port: 12990\n  prometheus2:\n    exporter: Prometheus\n    port: 14567\n\nPollers:\n  cluster1:\n    addr: 10.0.1.1\n    username: user\n    password: pass\n    exporters:\n      - prometheus1\n  cluster2:\n      addr: 10.0.1.1\n      username: user\n      password: pass\n      exporters:\n        - prometheus2\n
        $ vim /etc/prometheus/prometheus.yml\n

        Scroll down to near the end of the file and add the following lines:

          - job_name: 'harvest'\n    scrape_interval: 60s\n    static_configs:\n      - targets:\n          - 'localhost:12990'\n          - 'localhost:14567'\n

        NOTE If Prometheus is not on the same machine as Harvest, then replace localhost with the IP address of your Harvest machine. Also note the scrape interval above is set to 1m. That matches the polling frequency of the default Harvest collectors. If you change the polling frequency of a Harvest collector to a lower value, you should also change the scrape interval.

        "},{"location":"prometheus-exporter/#prometheus-exporter-and-tls","title":"Prometheus Exporter and TLS","text":"

        The Harvest Prometheus exporter can be configured to serve its metrics via HTTPS by configuring the tls section in the Exporters section of harvest.yml.

        Let's walk through an example of how to set up Harvest's Prometheus exporter and how to configure Prometheus to use TLS.

        "},{"location":"prometheus-exporter/#generate-tls-certificates","title":"Generate TLS Certificates","text":"

        We'll use Harvest's admin command line tool to create a self-signed TLS certificate key/pair for the exporter and Prometheus. Note: If running in a container, see note.

        cd $Harvest_Install_Directory\nbin/harvest admin tls create server\n2023/06/23 09:39:48 wrote cert/admin-cert.pem\n2023/06/23 09:39:48 wrote cert/admin-key.pem\n

        Two files are created. Since we want to use these certificates for our Prometheus exporter, let's rename them to make that clearer.

        mv cert/admin-cert.pem cert/prom-cert.pem\nmv cert/admin-key.pem cert/prom-key.pem\n
        "},{"location":"prometheus-exporter/#configure-harvest-prometheus-exporter-to-use-tls","title":"Configure Harvest Prometheus Exporter to use TLS","text":"

        Edit your harvest.yml and add a TLS section to your exporter block like this:

        Exporters:\n  my-exporter:\n    local_http_addr: localhost\n    exporter: Prometheus\n    port: 16001\n    tls:\n      cert_file: cert/prom-cert.pem\n      key_file: cert/prom-key.pem\n

        Update one of your Pollers to use this exporter and start the poller.

        Pollers:\n  my-cluster:\n    datacenter: dc-1\n    addr: 10.193.48.11\n    exporters:\n      - my-exporter     # Use TLS exporter we created above\n

        When the poller is started, it will log whether https or http is being used as part of the url like so:

        bin/harvest start -f my-cluster\n2023-06-23T10:02:03-04:00 INF prometheus/httpd.go:40 > server listen Poller=my-cluster exporter=my-exporter url=https://localhost:16001/metrics\n

        If the url schema is https, TLS is being used.

        You can use curl to scrape the Prometheus exporter and verify that TLS is being used like so:

        curl --cacert cert/prom-cert.pem https://localhost:16001/metrics\n\n# or use --insecure to tell curl to skip certificate validation\n# curl --insecure cert/prom-cert.pem https://localhost:16001/metrics  \n
        "},{"location":"prometheus-exporter/#configure-prometheus-to-use-tls","title":"Configure Prometheus to use TLS","text":"

        Let's configure Prometheus to use HTTPs to communicate with the exporter setup above.

        Edit your prometheus.yml and add or adapt your scrape_configs job. You need to add scheme: https and setup a tls_config block to point to the earlier created prom-cert.pem like so:

        scrape_configs:\n  - job_name: 'harvest-https'\n    scheme: https\n    tls_config:\n      ca_file: /path/to/prom-cert.pem\n    static_configs:\n    - targets:\n        - 'localhost:16001'\n

        Start Prometheus and visit http://localhost:9090/targets with your browser. You should see https://localhost:16001/metrics in the list of targets.

        "},{"location":"prometheus-exporter/#prometheus-alerts","title":"Prometheus Alerts","text":"

        Prometheus includes out-of-the-box support for simple alerting. Alert rules are configured in your prometheus.yml file. Setup and details can be found in the Prometheus guide on alerting.

        Harvest also includes EMS and sample alerts for reference. Refer to the EMS Collector for more details about EMS events. Refer to the EMS alert runbook for descriptions and remediation steps.

        "},{"location":"prometheus-exporter/#alertmanager","title":"Alertmanager","text":"

        Prometheus's builtin alerts are good for simple workflows. They do a nice job telling you what's happening at the moment. If you need a richer solution that includes summarization, notification, advanced delivery, deduplication, etc. checkout Alertmanager.

        "},{"location":"prometheus-exporter/#reference","title":"Reference","text":"
        • Prometheus Alerting
        • Alertmanager
        • Alertmanager's notification metrics
        • Prometheus Linter
        • Collection of example Prometheus Alerts
        "},{"location":"quickstart/","title":"Quickstart","text":"

        Welcome to the NetApp Harvest Getting Started Guide. This tutorial will guide you through the steps required to deploy an instance of NetApp Harvest, Prometheus, and Grafana on a Linux platform to monitor an ONTAP cluster.

        This tutorial uses systemd to manage Harvest, Prometheus, and Grafana. If you would rather, run the processes directly, feel free to ignore the sections of the tutorial that setup systemd service files.

        "},{"location":"quickstart/#1-set-installation-path","title":"1. Set Installation Path","text":"

        First, set the installation path as an environment variable. For example, we'll use /opt/netapp/harvest.

        HARVEST_INSTALL_PATH=/opt/netapp/harvest\nmkdir -p ${HARVEST_INSTALL_PATH}\n
        "},{"location":"quickstart/#2-install-harvest","title":"2. Install Harvest","text":"

        Harvest is distributed as a container, native tarball, and RPM and Debs. Pick the one that works best for you. More details can be found in the installation documentation.

        For this guide, we'll use the tarball package as an example.

        Visit the releases page and take note of the latest release. Update the HARVEST_VERSION environment variable with the latest release in the script below. For example, to download the 24.05.2 release you would use HARVEST_VERSION=24.05.2

        After updating the HARVEST_VERSION environment variable run the bash script to download Harvest and untar it into your HARVEST_INSTALL_PATH directory.

        HARVEST_VERSION=24.05.2\ncd ${HARVEST_INSTALL_PATH}\nwget https://github.com/NetApp/harvest/releases/download/v${HARVEST_VERSION}/harvest-${HARVEST_VERSION}-1_linux_amd64.tar.gz\ntar -xvf harvest-${HARVEST_VERSION}-1_linux_amd64.tar.gz\n
        "},{"location":"quickstart/#3-install-prometheus","title":"3. Install Prometheus","text":"

        To install Prometheus, follow these steps. For more details see Prometheus installation.

        PROMETHEUS_VERSION=2.49.1\ncd ${HARVEST_INSTALL_PATH}\nwget https://github.com/prometheus/prometheus/releases/download/v${PROMETHEUS_VERSION}/prometheus-${PROMETHEUS_VERSION}.linux-amd64.tar.gz\ntar -xvf prometheus-${PROMETHEUS_VERSION}.linux-amd64.tar.gz\nmv prometheus-${PROMETHEUS_VERSION}.linux-amd64 prometheus-${PROMETHEUS_VERSION}\n
        If you want to manage Prometheus with systemd, you can create a service file for Prometheus like so. This step is optional. A service file will attempt to restart Prometheus automatically when the machine is restarted.

        Create a service file for Prometheus:

        cat << EOF | sudo tee /etc/systemd/system/prometheus.service\n[Unit]\nDescription=Prometheus Server\nDocumentation=https://prometheus.io/docs/introduction/overview/\nAfter=network-online.target\n\n[Service]\nUser=root\nRestart=on-failure\nExecStart=${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}/prometheus --config.file=${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}/prometheus.yml\n\n[Install]\nWantedBy=multi-user.target\nEOF\n

        Reload the systemd configuration and start Prometheus:

        sudo systemctl daemon-reload\nsudo systemctl enable prometheus\nsudo systemctl start prometheus\n

        Check if Prometheus is up and running:

        sudo systemctl status prometheus\n

        You should see output indicating that the Prometheus service is active and running.

        Alternative: Start Prometheus Directly If you would rather start Prometheus directly and kick the tires before creating a service file, you can run the following command to start Prometheus in the background:
        nohup ${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}/prometheus --config.file=${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}/prometheus.yml > prometheus.log 2>&1 &\n
        This command uses nohup to run Prometheus in the background and redirects the output to prometheus.log."},{"location":"quickstart/#4-install-grafana","title":"4. Install Grafana","text":"

        To install Grafana, follow these steps:

        GRAFANA_VERSION=10.4.5\ncd ${HARVEST_INSTALL_PATH}\nwget https://dl.grafana.com/oss/release/grafana-${GRAFANA_VERSION}.linux-amd64.tar.gz\ntar -xvf grafana-${GRAFANA_VERSION}.linux-amd64.tar.gz\n

        If you want to manage Grafana with systemd, you can create a service file for Grafana like so. This step is optional. A service file will attempt to restart Grafana automatically when the machine is restarted.

        Create a service file for Grafana:

        cat << EOF | sudo tee /etc/systemd/system/grafana.service\n[Unit]\nDescription=Grafana Server\nDocumentation=https://grafana.com/docs/grafana/latest/setup-grafana/installation/\nAfter=network-online.target\n\n[Service]\nUser=root\nRestart=on-failure\nExecStart=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}/bin/grafana-server --config=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}/conf/defaults.ini --homepath=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}\n\n[Install]\nWantedBy=multi-user.target\nEOF\n

        Reload the systemd configuration and start Grafana:

        sudo systemctl daemon-reload\nsudo systemctl enable grafana\nsudo systemctl start grafana\n

        Check if Grafana is up and running:

        sudo systemctl status grafana\n

        You should see output indicating that the Grafana service is active and running.

        Alternative: Start Grafana Directly If you would rather start Grafana directly and kick the tires before creating a service file, you can run the following command to start Grafana in the background:
        nohup ${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}/bin/grafana-server --config=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION}/conf/defaults.ini --homepath=${HARVEST_INSTALL_PATH}/grafana-v${GRAFANA_VERSION} > grafana.log 2>&1 &\n
        This command uses nohup to run Grafana in the background and redirects the output to grafana.log."},{"location":"quickstart/#5-configuration-file","title":"5. Configuration File","text":"

        By default, Harvest loads its configuration information from the ./harvest.yml file. If you would rather use a different file, use the --config command line argument flag to specify the path to your config file.

        To start collecting metrics, you need to define at least one poller and one exporter in your configuration file. This is useful if you want to monitor resource usage by Harvest and serves as a good example. Feel free to delete it if you want.

        The next step is to add pollers for your ONTAP clusters in the Pollers section of the Harvest configuration file, harvest.yml.

        Edit the Harvest configuration file:

        cd ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64\nvi harvest.yml\n

        Copy and paste the following YAML configuration into your editor and update the $cluster-management-ip, $username, and $password sections to match your ONTAP system.

        Exporters:\n  prometheus1:\n    exporter: Prometheus\n    port_range: 13000-13100\n\nDefaults:\n  collectors:\n    - Zapi\n    - ZapiPerf\n    - Ems\n    - Rest\n    - RestPerf\n  use_insecure_tls: true\n\nPollers:\n  jamaica:\n    datacenter: DC-01\n    addr: $cluster-management-ip\n    auth_style: basic_auth\n    username: $username\n    password: $password\n    exporters:\n      - prometheus1\n

        Note: The ONTAP user specified in this configuration must have the appropriate permissions as outlined in the Prepare cDot Clusters documentation.

        "},{"location":"quickstart/#6-edit-prometheus-config-file","title":"6. Edit Prometheus Config File","text":"

        Edit the Prometheus configuration file:

        cd ${HARVEST_INSTALL_PATH}/prometheus-${PROMETHEUS_VERSION}\nvi prometheus.yml\n

        Add the following under the scrape_configs section. The targets you are adding should match the range of ports you specified in your harvest.yml file (in the example above, we use the port_range 13000-13100).

          - job_name: 'harvest'\n    static_configs:\n      - targets: ['localhost:13000', 'localhost:13001', 'localhost:13002']  # Add ports as defined in the port range\n

        For example, if your port range in the Harvest configuration is 13000-13100, you should add the ports within this range that you plan to use.

        Restart Prometheus to apply the changes:

        sudo systemctl restart prometheus\n

        Check if Prometheus is up and running:

        sudo systemctl status prometheus\n
        "},{"location":"quickstart/#7-start-harvest","title":"7. Start Harvest","text":"

        To start the Harvest pollers, follow these steps. For more details see Harvest service.

        Create a systemd service file for Harvest pollers:

        cat << EOF | sudo tee /etc/systemd/system/poller@.service\n[Unit]\nDescription=\"NetApp Harvest Poller instance %I\"\nPartOf=harvest.target\nAfter=network-online.target\nWants=network-online.target\n\n[Service]\nUser=harvest\nGroup=harvest\nType=simple\nRestart=on-failure\nExecStart=${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64/bin/harvest --config ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64/harvest.yml start -f %i\n\n[Install]\nWantedBy=harvest.target\nEOF\n

        Create a target file for Harvest:

        cd ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64\nbin/harvest generate systemd | sudo tee /etc/systemd/system/harvest.target\n

        Reload the systemd configuration and start Harvest:

        sudo systemctl daemon-reload\nsudo systemctl enable harvest.target\nsudo systemctl start harvest.target\n

        Verify that the pollers have started successfully by checking their status:

        systemctl status \"poller*\"\n
        Alternative: Start Harvest Directly If you would rather start Harvest directly and kick the tires before creating a service file, you can run the following command to start Harvest:
        cd ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64\nbin/harvest start\n
        Verify that the pollers have started successfully by checking their status:
        bin/harvest status\n
        The output should look similar to this:
        Datacenter | Poller  |   PID   | PromPort | Status\n-----------+---------+---------+----------+----------\nDC-01      | jamaica | 1280145 |    13000 | running\n

        The logs of each poller can be found in /var/log/harvest/.

        "},{"location":"quickstart/#8-add-prometheus-datasource-in-grafana","title":"8. Add Prometheus Datasource in Grafana","text":"

        To add a Prometheus datasource in Grafana, follow these steps:

        1. Open your web browser and navigate to Grafana (http://localhost:3000). When prompted for credentials, use Grafana defaults admin/admin. You should change the default credentials once you log in.
        2. Navigate to the data sources section by visiting http://localhost:3000/connections/datasources or by clicking the hamburger menu (three horizontal lines) at the top-left of the page and navigate to Connections and then Data Sources.
        3. Click on Add data source.
        4. Select Prometheus from the list of available data sources.
        5. In the Prometheus server URL field, enter (http://localhost:9090).
        6. Click on Save and test.
        7. At the bottom of the page, you should see the message 'Successfully queried the Prometheus API.' For detailed instructions, please refer to the configure Prometheus Data Source documentation.
        "},{"location":"quickstart/#9-generate-grafana-api-token","title":"9. Generate Grafana API Token","text":"

        To import Grafana dashboards using the bin/harvest grafana import command, you need a Grafana API token. Follow these steps to generate it:

        1. Open your web browser and navigate to Grafana (http://localhost:3000). Enter your Grafana credentials to log in. The default username and password are admin.
        2. Click the hamburger menu (three horizontal lines) at the top-left of the page and Navigate to Administration -> Users and access and then select Service Account.
        3. Click on Add Service Account.
        4. Enter the display name Harvest.
        5. Set the role to Editor.
        6. Click on Create. The service account will appear in the dashboard.
        7. Navigate back to Service Account.
        8. Click on Add service account token for the Harvest service account.
        9. Click on Generate Token.
        10. Click on Copy to clipboard and close.

        IMPORTANT: This is the only opportunity to save the token. Immediately paste it into a text file and save it. The token will be needed by Harvest later on.

        For detailed instructions, please refer to the Grafana API Keys documentation.

        "},{"location":"quickstart/#10-import-grafana-dashboards","title":"10. Import Grafana Dashboards","text":"

        To import Grafana dashboards, use the following command:

        cd ${HARVEST_INSTALL_PATH}/harvest-${HARVEST_VERSION}-1_linux_amd64\nbin/harvest grafana import --token YOUR_TOKEN_HERE\n

        Replace YOUR_TOKEN_HERE with the token obtained in step 10.

        You will be prompted to save your API key (token) for later use. Press n to not save the token in your harvest.yml file.

        After a few seconds, all the dashboards will be imported into Grafana.

        "},{"location":"quickstart/#9-verify-dashboards-in-grafana","title":"9. Verify Dashboards in Grafana","text":"

        After adding the Prometheus datasource, you can verify that your dashboards are correctly displaying data. Follow these steps:

        1. Open your web browser and navigate to Grafana (http://localhost:3000). Enter your Grafana credentials to log in. The default username and password are admin.
        2. Click on the \"three lines\" button (also known as the hamburger menu) in the top left corner of the Grafana interface. From the menu, select Dashboards.
        3. Open the Volume dashboard. Once the dashboard opens, you should see volume data displayed.
        "},{"location":"quickstart/#troubleshooting","title":"Troubleshooting","text":"

        If you encounter issues, check the logs in /var/log/harvest and refer to the troubleshooting section on the wiki. You can also reach out for help on Discord or via email at ng-harvest-files@netapp.com.

        "},{"location":"quickstart/#conclusion","title":"Conclusion","text":"

        \ud83c\udf8a Congratulations! You have successfully set up NetApp Harvest, Prometheus, and Grafana. Enjoy monitoring your systems and feel free to reach out on Discord, GitHub, or email.

        "},{"location":"release-notes/","title":"Release Notes","text":"
        • Changelog
        • Releases
        "},{"location":"system-requirements/","title":"System Requirements","text":"

        Harvest is written in Go, which means it runs on recent Linux systems. It also runs on Macs for development.

        Hardware requirements depend on how many clusters you monitor and the number of metrics you chose to collect. With the default configuration, when monitoring 10 clusters, we recommend:

        • CPU: 2 cores
        • Memory: 1 GB
        • Disk: 500 MB (mostly used by log files)

        Note: These CPU, memory, and disk requirements are just for Harvest and do not include Prometheus, InfluxDB, Grafana.

        Harvest is compatible with:

        • Prometheus: 2.33 or higher
        • InfluxDB: v2
        • Grafana: 8.1.X or higher
        • Docker: 20.10.0 or higher and compatible Docker Compose
        "},{"location":"architecture/rest-collector/","title":"REST collector","text":""},{"location":"architecture/rest-collector/#status","title":"Status","text":"

        ~~Accepted~~ Superseded by REST strategy

        The exact version of ONTAP that has full ZAPI parity is subject to change. Everywhere you see version 9.12, may become 9.13 or later.

        "},{"location":"architecture/rest-collector/#context","title":"Context","text":"

        We need to document and communicate to customers: - when they should switch from the ZAPI collectors to the REST ones - what versions of ONTAP are supported by Harvest's REST collectors - how to fill ONTAP gaps between the ZAPI and REST APIs

        The ONTAP version information is important because gaps are addressed in later versions of cDOT.

        "},{"location":"architecture/rest-collector/#considered-options","title":"Considered Options","text":"
        1. Only REST A clean cut-over, stop using ZAPI, and switch completely to REST.

        2. Both Support both ZAPI and REST collectors running at the same time, collecting the same objects. Flexible, but has the downside of last-write wins. Not recommended unless you selectively pick non-overlapping sets of objects.

        3. Template change that supports both Change the template to break ties, priority, etc. Rejected because additional complexity not worth the benefits.

        4. private-cli When there are REST gaps that have not been filled yet or will never be filled (WONTFIX), the Harvest REST collector will provide infrastructure and documentation on how to use private-cli pass-through to address gaps.

        "},{"location":"architecture/rest-collector/#chosen-decision","title":"Chosen Decision","text":"

        For clusters with ONTAP versions < 9.12, we recommend customers use the ZAPI collectors. (#2) (#4)

        Once ONTAP 9.12+ is released and customers have upgraded to it, they should make a clean cut-over to the REST collectors (#1). ONTAP 9.12 is the version of ONTAP that has the best parity with what Harvest collects in terms of config and performance counters. Harvest REST collectors, templates, and dashboards are validated against ONTAP 9.12+. Most of the REST config templates will work before 9.12, but unless you have specific needs, we recommend sticking with the ZAPI collectors until you upgrade to 9.12.

        There is little value in running both the ZAPI and REST collectors for an overlapping set of objects. It's unlikely you want to collect the same object via REST and ZAPI at the same time. Harvest doesn't support this use-case, but does nothing to detect or prevent it.

        If you want to collect a non-overlapping set of objects with REST and ZAPI, you can. If you do, we recommend you disable the ZAPI object collector. For example, if you enable the REST disk template, you should disable the ZAPI disk template. We do NOT recommend collecting an overlapping set of objects with both collectors since the last one to run will overwrite previously collected data.

        Harvest will document how to use the REST private cli pass-through to collect custom and non-public counters.

        The Harvest team recommends that customers open ONTAP issues for REST public API gaps that need filled.

        "},{"location":"architecture/rest-collector/#consequences","title":"Consequences","text":"

        The Harvest REST collectors will work with limitations on earlier versions of ONTAP. ONTAP 9.12+ is the minimally validated version. We only validate the full set of templates, dashboards, counters, etc. on versions of ONTAP 9.12+

        Harvest does not prevent you from collecting the same resource with ZAPI and REST.

        "},{"location":"architecture/rest-strategy/","title":"REST Strategy","text":""},{"location":"architecture/rest-strategy/#status","title":"Status","text":"

        Accepted

        "},{"location":"architecture/rest-strategy/#context","title":"Context","text":"

        ONTAP has published a customer product communiqu\u00e9 (CPC-00410) announcing that ZAPIs will reach end of availability (EOA) in ONTAP 9.13.1 released Q2 2023.

        This document describes how Harvest handles the ONTAP transition from ZAPI to REST. In most cases, no action is required on your part.

        "},{"location":"architecture/rest-strategy/#harvest-api-transition","title":"Harvest API Transition","text":"

        Harvest tries to use the protocol you specify in your harvest.yml config file.

        When specifying the ZAPI collector, Harvest will use the ZAPI protocol unless the cluster no longer speaks Zapi, in which case, Harvest will switch to REST.

        If you specify the REST collector, Harvest will use the REST protocol.

        Harvest includes a full set of REST templates that export identical metrics as the included ZAPI templates. No changes to dashboards or downstream metric-consumers should be required. See below if you have added metrics to the Harvest out-of-the-box templates.

        Read on if you want to know how you can use REST sooner, or you want to take advantage of REST-only features in ONTAP.

        "},{"location":"architecture/rest-strategy/#frequently-asked-questions","title":"Frequently Asked Questions","text":""},{"location":"architecture/rest-strategy/#how-does-harvest-decide-whether-to-use-rest-or-zapi-apis","title":"How does Harvest decide whether to use REST or ZAPI APIs?","text":"

        Harvest attempts to use the collector defined in your harvest.yml config file.

        • If you specify the ZAPI collector, Harvest will use the ZAPI protocol as long as the cluster still speaks Zapi. If the cluster no longer understands Zapi, Harvest will switch to Rest.

        • If you specify the REST collector, Harvest will use REST.

        Earlier versions of Harvest included a prefer_zapi poller option and a HARVEST_NO_COLLECTOR_UPGRADE environment variable. Both of these options are ignored in Harvest versions 23.08 onwards.

        "},{"location":"architecture/rest-strategy/#why-would-i-switch-to-rest-before-9131","title":"Why would I switch to REST before 9.13.1?","text":"
        • You have advanced use cases to validate before ONTAP removes ZAPIs
        • You want to take advantage of new ONTAP features that are only available via REST (e.g., cloud features, event remediation, name services, cluster peers, etc.)
        • You want to collect a metric that is not available via ZAPI
        • You want to collect a metric from the ONTAP CLI. The REST API includes a private CLI pass-through to access any ONTAP CLI command
        "},{"location":"architecture/rest-strategy/#can-i-start-using-rest-before-9131","title":"Can I start using REST before 9.13.1?","text":"

        Yes. Many customers do. Be aware of the following limitations:

        1. ONTAP includes a subset of performance counters via REST beginning in ONTAP 9.11.1.
        2. There may be performance metrics missing from versions of ONTAP earlier than 9.11.1.

        Where performance metrics are concerned, because of point #2, our recommendation is to wait until at least ONTAP 9.12.1 before switching to the RestPerf collector. You can continue using the ZapiPerf collector until you switch.

        "},{"location":"architecture/rest-strategy/#a-counter-is-missing-from-rest-what-do-i-do","title":"A counter is missing from REST. What do I do?","text":"

        The Harvest team has ensured that all the out-of-the-box ZAPI templates have matching REST templates with identical metrics as of Harvest 22.11 and ONTAP 9.12.1. Any additional ZAPI Perf counters you have added may be missing from ONTAP REST Perf.

        Join the Harvest discord channel and ask us about the counter. Sometimes we may know which release the missing counter is coming in, otherwise we can point you to the ONTAP process to request new counters.

        "},{"location":"architecture/rest-strategy/#can-i-use-the-rest-and-zapi-collectors-at-the-same-time","title":"Can I use the REST and ZAPI collectors at the same time?","text":"

        Yes. Harvest ensures that duplicate resources are not collected from both collectors.

        When there is potential duplication, Harvest first resolves the conflict in the order collectors are defined in your poller and then negotiates with the cluster on the most appropriate API to use per above.

        Let's take a look at a few examples using the following poller definition:

        cluster-1:\n    datacenter: dc-1\n    addr: 10.1.1.1\n    collectors:\n        - Zapi\n        - Rest\n
        • When cluster-1 is running ONTAP 9.9.X (ONTAP still supports ZAPIs), the Zapi collector will be used since it is listed first in the list of collectors. When collecting a REST-only resource like, nfs_client, the Rest collector will be used since nfs_client objects are only available via REST.

        • When cluster-1 is running ONTAP 9.18.1 (ONTAP no longer supports ZAPIs), the Rest collector will be used since ONTAP can no longer speak the ZAPI protocol.

        If you want the REST collector to be used in all cases, change the order in the collectors section so Rest comes before Zapi.

        If the resource does not exist for the first collector, the next collector will be tried. Using the example above, when collecting VolumeAnalytics resources, the Zapi collector will not run for VolumeAnalytics objects since that resource is only available via REST. The Rest collector will run and collect the VolumeAnalytics objects.

        "},{"location":"architecture/rest-strategy/#ive-added-counters-to-existing-zapi-templates-will-those-counters-work-in-rest","title":"I've added counters to existing ZAPI templates. Will those counters work in REST?","text":"

        ZAPI config metrics often have a REST equivalent that can be found in ONTAP's ONTAPI to REST mapping document.

        ZAPI performance metrics may be missing in REST. If you have added new metrics or templates to the ZapiPerf collector, those metrics likely aren't available via REST. You can check if the performance counter is available, ask the Harvest team on Discord, or ask ONTAP to add the counter you need.

        "},{"location":"architecture/rest-strategy/#reference","title":"Reference","text":"

        Table of ONTAP versions, dates and API notes.

        ONTAPversion ReleaseDate ONTAPNotes 9.11.1 Q2 2022 First version of ONTAP with REST performance metrics 9.12.1 Q4 2022 ZAPIs still supported - REST performance metrics have parity with Harvest 22.11 collected ZAPI performance metrics 9.13.1 ZAPIs still supported 9.14.1-9.15.1 ZAPIs enabled if ONTAP upgrade detects they were being used earlier. New ONTAP installs default to REST only. ZAPIs may be enabled via CLI 9.16.1-9.17.1 ZAPIs disabled. See ONTAP communique for details on re-enabling 9.18.1 ZAPIs removed. No way to re-enable"},{"location":"help/config-collection/","title":"Harvest Config Collection Guide","text":"

        This guide is designed to help you validate your Harvest configuration (harvest.yml) on various platforms. The commands in this guide will generate redacted output that personally identifiable information (PII) removed. This makes it safe for you to share the output. Follow the instructions specific to your platform. If you wish to share it with the Harvest team, please email them at ng-harvest-files@netapp.com.

        "},{"location":"help/config-collection/#rpm-deb-and-native-installations","title":"RPM, DEB, and Native Installations","text":"

        To print a redacted version of your Harvest configuration to the console, use the following command:

        cd /opt/harvest\nexport CONFIG_FILE_NAME=harvest.yml\nbin/harvest doctor --print --config $CONFIG_FILE_NAME\n
        "},{"location":"help/config-collection/#docker-container","title":"Docker Container","text":"

        For Docker containers, use the following command to print a redacted version of your Harvest configuration to the console:

        cd to/where/your/harvest.yml/is\nexport CONFIG_FILE_NAME=harvest.yml\ndocker run --rm --entrypoint \"bin/harvest\" --volume \"$(pwd)/$CONFIG_FILE_NAME:/opt/harvest/harvest.yml\" ghcr.io/netapp/harvest doctor --print\n
        "},{"location":"help/config-collection/#nabox","title":"NABox","text":"

        If you're using NABox, you'll need to ssh into your NABox instance. Then, use the following command to print a redacted version of your Harvest configuration to the console:

        dc exec -w /conf nabox-harvest2 /netapp-harvest/bin/harvest doctor --print\n

        If your configuration file name is different from the default harvest.yml, remember to change the CONFIG_FILE_NAME environment variable to match your file name.

        "},{"location":"help/faq/","title":"FAQ","text":""},{"location":"help/faq/#how-do-i-migrate-from-harvest-16-to-20","title":"How do I migrate from Harvest 1.6 to 2.0?","text":"

        There currently is not a tool to migrate data from Harvest 1.6 to 2.0. The most common workaround is to run both, 1.6 and 2.0, in parallel. Run both, until the 1.6 data expires due to normal retention policy, and then fully cut over to 2.0.

        Technically, it\u2019s possible to take a Graphite DB, extract the data, and send it to a Prometheus db, but it\u2019s not an area we\u2019ve invested in. If you want to explore that option, check out the promtool which supports importing, but probably not worth the effort.

        "},{"location":"help/faq/#how-do-i-share-sensitive-log-files-with-netapp","title":"How do I share sensitive log files with NetApp?","text":"

        Email them to ng-harvest-files@netapp.com This mail address is accessible to NetApp Harvest employees only.

        "},{"location":"help/faq/#multi-tenancy","title":"Multi-tenancy","text":""},{"location":"help/faq/#question","title":"Question","text":"

        Is there a way to allow per SVM level user views? I need to offer 1 tenant per SVM. Can I limit visibility to specific SVMs? Is there an SVM dashboard available?

        "},{"location":"help/faq/#answer","title":"Answer","text":"

        You can do this with Grafana. Harvest can provide the labels for SVMs. The pieces are there but need to be put together.

        Grafana templates support the $__user variable to make pre-selections and decisions. You can use that + metadata mapping the user <-> SVM. With both of those you can build SVM specific dashboards.

        There is a German service provider who is doing this. They have service managers responsible for a set of customers \u2013 and only want to see the data/dashboards of their corresponding customers.

        "},{"location":"help/faq/#harvest-authentication-and-permissions","title":"Harvest Authentication and Permissions","text":""},{"location":"help/faq/#question_1","title":"Question","text":"

        What permissions does Harvest need to talk to ONTAP?

        "},{"location":"help/faq/#answer_1","title":"Answer","text":"

        Permissions, authentication, role based security, and creating a Harvest user are covered here.

        "},{"location":"help/faq/#ontap-counters-are-missing","title":"ONTAP counters are missing","text":""},{"location":"help/faq/#question_2","title":"Question","text":"

        How do I make Harvest collect additional ONTAP counters?

        "},{"location":"help/faq/#answer_2","title":"Answer","text":"

        Instead of modifying the out-of-the-box templates in the conf/ directory, it is better to create your own custom templates following these instructions.

        "},{"location":"help/faq/#capacity-metrics","title":"Capacity Metrics","text":""},{"location":"help/faq/#question_3","title":"Question","text":"

        How are capacity and other metrics calculated by Harvest?

        "},{"location":"help/faq/#answer_3","title":"Answer","text":"

        Each collector has its own way of collecting and post-processing metrics. Check the documentation of each individual collector (usually under section #Metrics). Capacity and hardware-related metrics are collected by the Zapi collector which emits metrics as they are without any additional calculation. Performance metrics are collected by the ZapiPerf collector and the final values are calculated from the delta of two consequent polls.

        "},{"location":"help/faq/#tagging-volumes","title":"Tagging Volumes","text":""},{"location":"help/faq/#question_4","title":"Question","text":"

        How do I tag ONTAP volumes with metadata and surface that data in Harvest?

        "},{"location":"help/faq/#answer_4","title":"Answer","text":"

        See volume tagging issue and volume tagging via sub-templates

        "},{"location":"help/faq/#rest-and-zapi-documentation","title":"REST and Zapi Documentation","text":""},{"location":"help/faq/#question_5","title":"Question","text":"

        How do I relate ONTAP REST endpoints to ZAPI APIs and attributes?

        "},{"location":"help/faq/#answer_5","title":"Answer","text":"

        Please refer to the ONTAPI to REST API mapping document.

        "},{"location":"help/faq/#sizing","title":"Sizing","text":"

        How much disk space is required by Prometheus?

        This depends on the collectors you've added, # of nodes monitored, cardinality of labels, # instances, retention, ingest rate, etc. A good approximation is to curl your Harvest exporter and count the number of samples that it publishes and then feed that information into a Prometheus sizing formula.

        Prometheus stores an average of 1-2 bytes per sample. To plan the capacity of a Prometheus server, you can use the rough formula: needed_disk_space = retention_time_seconds * ingested_samples_per_second * bytes_per_sample

        A rough approximation is outlined https://devops.stackexchange.com/questions/9298/how-to-calculate-disk-space-required-by-prometheus-v2-2

        "},{"location":"help/faq/#topk-usage-in-grafana","title":"Topk usage in Grafana","text":""},{"location":"help/faq/#question_6","title":"Question","text":"

        In Grafana, why do I see more results from topk than I asked for?

        "},{"location":"help/faq/#answer_6","title":"Answer","text":"

        Topk is one of Prometheus's out-of-the-box aggregation operators, and is used to calculate the largest k elements by sample value.

        Depending on the time range you select, Prometheus will often return more results than you asked for. That's because Prometheus is picking the topk for each time in the graph. In other words, different time series are the topk at different times in the graph. When you use a large duration, there are often many time series.

        This is a limitation of Prometheus and can be mitigated by:

        • reducing the time range to a smaller duration that includes fewer topk results - something like a five to ten minute range works well for most of Harvest's charts
        • the panel's table shows the current topk rows and that data can be used to supplement the additional series shown in the charts

        Additional details: here, here, and here

        "},{"location":"help/faq/#where-are-harvest-container-images-published","title":"Where are Harvest container images published?","text":"

        Harvest container images are published to both GitHub's image registry (ghcr.io) and Docker's image registry (hub.docker.com). By default, ghcr.io is used for pulling images.

        Please note that cr.netapp.io is no longer being maintained. If you have been using cr.netapp.io to pull Harvest images, we encourage you to switch to ghcr.io or Docker Hub as your container image registry. Starting in 2024, we will cease publishing Harvest container images to cr.netapp.io.

        "},{"location":"help/faq/#how-do-i-switch-between-image-registries","title":"How do I switch between image registries?","text":""},{"location":"help/faq/#answer_7","title":"Answer","text":"

        Replace all instances of rahulguptajss/harvest:latest with ghcr.io/netapp/harvest:latest:

        • Edit your docker-compose file and make those replacements or regenerate the compose file.

        • Update any shell or Ansible scripts you have that are also using those images

        • After making these changes, you should stop your containers, pull new images, and restart.

        You can verify that you're using the GitHub Container Registry images like so:

        Before

        docker image ls -a\nREPOSITORY                  TAG       IMAGE ID       CREATED        SIZE\nrahulguptajss/harvest       latest    80061bbe1c2c   10 days ago    56.4MB <=== Docker Hub\nprom/prometheus             v2.33.1   e528f02c45a6   3 weeks ago    204MB\ngrafana/grafana             8.3.4     4a34578e4374   5 weeks ago    274MB\n

        Pull image from GitHub Container Registry

        docker pull ghcr.io/netapp/harvest:latest\nUsing default tag: latest\nlatest: Pulling from ghcr.io/netapp/harvest\nDigest: sha256:6ff88153812ebb61e9dd176182bf8a792cde847748c5654d65f4630e61b1f3ae\nStatus: Image is up to date for ghcr.io/netapp/harvest:latest\nghcr.io/netapp/harvest:latest\n

        Notice that the IMAGE ID for both images are identical since the images are the same.

        docker image ls -a\nREPOSITORY                  TAG       IMAGE ID       CREATED        SIZE\nghcr.io/netapp/harvest      latest    80061bbe1c2c   10 days ago    56.4MB  <== Harvest image from GitHub Container Registry\nrahulguptajss/harvest       latest    80061bbe1c2c   10 days ago    56.4MB\nprom/prometheus             v2.33.1   e528f02c45a6   3 weeks ago    204MB\ngrafana/grafana             8.3.4     4a34578e4374   5 weeks ago    274MB\n

        We can now remove the Docker Hub pulled image

        docker image rm rahulguptajss/harvest:latest\nUntagged: rahulguptajss/harvest:latest\nUntagged: rahulguptajss/harvest@sha256:6ff88153812ebb61e9dd176182bf8a792cde847748c5654d65f4630e61b1f3ae\n\ndocker image ls -a\nREPOSITORY              TAG       IMAGE ID       CREATED        SIZE\nghcr.io/netapp/harvest   latest    80061bbe1c2c   10 days ago    56.4MB\nprom/prometheus         v2.33.1   e528f02c45a6   3 weeks ago    204MB\ngrafana/grafana         8.3.4     4a34578e4374   5 weeks ago    274MB\n
        "},{"location":"help/faq/#ports","title":"Ports","text":""},{"location":"help/faq/#what-ports-does-harvest-use","title":"What ports does Harvest use?","text":""},{"location":"help/faq/#answer_8","title":"Answer","text":"

        The default ports are shown in the following diagram.

        • Harvest's pollers use ZAPI or REST to communicate with ONTAP on port 443
        • Each poller exposes the Prometheus port defined in your harvest.yml file
        • Prometheus scrapes each poller-exposed Prometheus port (promPort1, promPort2, promPort3)
        • Prometheus's default port is 9090
        • Grafana's default port is 3000
        "},{"location":"help/faq/#snapmirror_labels","title":"Snapmirror_labels","text":""},{"location":"help/faq/#why-do-my-snapmirror_labels-have-an-empty-source_node","title":"Why do my snapmirror_labels have an empty source_node?","text":""},{"location":"help/faq/#answer_9","title":"Answer","text":"

        Snapmirror relationships have a source and destination node. ONTAP however does not expose the source side of that relationship, only the destination side is returned via ZAPI/REST APIs. Because of that, the Prometheus metric named, snapmirror_labels, will have an empty source_node label.

        The dashboards show the correct value for source_node since we join multiple metrics in the Grafana panels to synthesize that information.

        In short: don't rely on the snapmirror_labels for source_node labels. If you need source_node you will need to do a similar join as the Snapmirror dashboard does.

        See https://github.com/NetApp/harvest/issues/1192 for more information and linked pull requests for REST and ZAPI.

        "},{"location":"help/faq/#nfs-clients-dashboard","title":"NFS Clients Dashboard","text":""},{"location":"help/faq/#why-do-my-nfs-clients-dashboard-have-no-data","title":"Why do my NFS Clients Dashboard have no data?","text":""},{"location":"help/faq/#answer_10","title":"Answer","text":"

        NFS Clients dashboard is only available through Rest Collector. This information is not available through Zapi. You must enable the Rest collector in your harvest.yml config and uncomment the nfs_clients.yaml section in your default.yaml file.

        Note: Enabling nfs_clients.yaml may slow down data collection.

        "},{"location":"help/faq/#file-analytics-dashboard","title":"File Analytics Dashboard","text":""},{"location":"help/faq/#why-do-my-file-analytics-dashboard-have-no-data","title":"Why do my File Analytics Dashboard have no data?","text":""},{"location":"help/faq/#answer_11","title":"Answer","text":"

        This dashboard requires ONTAP 9.8+ and the APIs are only available via REST. Please enable the REST collector in your harvest config. To collect and display usage data such as capacity analytics, you need to enable File System Analytics on a volume. Please see https://docs.netapp.com/us-en/ontap/task_nas_file_system_analytics_enable.html for more details.

        "},{"location":"help/faq/#why-do-i-have-volume-sis-stat-panel-empty-in-volume-dashboard","title":"Why do I have Volume Sis Stat panel empty in Volume dashboard?","text":""},{"location":"help/faq/#answer_12","title":"Answer","text":"

        This panel requires ONTAP 9.12+ and the APIs are only available via REST. Enable the REST collector in your harvest.yml config.

        "},{"location":"help/log-collection/","title":"Harvest Logs Collection Guide","text":"

        This guide will help you collect Harvest logs on various platforms. Follow the instructions specific to your platform. If you would like to share the collected logs with the Harvest team, please email them to ng-harvest-files@netapp.com.

        If the files are too large to email, let us know at the address above or on Discord, and we'll send you a file sharing link to upload your files.

        "},{"location":"help/log-collection/#rpm-deb-and-native-installations","title":"RPM, DEB, and Native Installations","text":"

        For RPM, DEB, and native installations, use the following command to create a compressed tar file containing the logs:

        tar -czvf harvest_logs.tar.gz -C /var/log harvest\n

        This command will create a file named harvest_logs.tar.gz with the contents of the /var/log/harvest directory.

        "},{"location":"help/log-collection/#docker-container","title":"Docker Container","text":"

        For Docker containers, first, identify the container ID for your Harvest instance. Then, replace <container_id> with the actual container ID in the following command:

        docker logs <container_id> &> harvest_logs.txt && tar -czvf harvest_logs.tar.gz harvest_logs.txt\n

        This command will create a file named harvest_logs.tar.gz containing the logs from the specified container.

        "},{"location":"help/log-collection/#nabox-4","title":"NABox 4","text":"

        Collect a support bundle from the NABox web interface by clicking the About button in the left gutter and then clicking the Download Support Bundle button.

        "},{"location":"help/log-collection/#nabox-3","title":"NABox 3","text":"

        For NABox installations, ssh into your nabox instance, and use the following command to create a compressed tar file containing the logs:

        dc logs nabox-api > nabox-api.log; dc logs nabox-harvest2 > nabox-harvest2.log;\\\n  tar -czf nabox-logs-`date +%Y-%m-%d_%H:%M:%S`.tgz *\n

        This command will create a file named nabox-logs-$date.tgz containing the nabox-api and Harvest poller logs.

        For more information, see the NABox documentation on collecting logs

        "},{"location":"help/troubleshooting/","title":"Checklists for Harvest","text":"

        A set of steps to go through when something goes wrong.

        "},{"location":"help/troubleshooting/#what-version-of-ontap-do-you-have","title":"What version of ONTAP do you have?","text":"

        Run the following, replacing <poller> with the poller from your harvest.yaml

        ./bin/harvest zapi -p <poller> show system\n

        Copy and paste the output into your issue. Here's an example:

        ./bin/harvest -p infinity show system\nconnected to infinity (NetApp Release 9.8P2: Tue Feb 16 03:49:46 UTC 2021)\n[results]                             -                                   *\n  [build-timestamp]                   -                          1613447386\n  [is-clustered]                      -                                true\n  [version]                           - NetApp Release 9.8P2: Tue Feb 16 03:49:46 UTC 2021\n  [version-tuple]                     -                                   *\n    [system-version-tuple]            -                                   *\n      [generation]                    -                                   9\n      [major]                         -                                   8\n      [minor]                         -                                   0\n

        "},{"location":"help/troubleshooting/#install-fails","title":"Install fails","text":"

        I tried to install and ...

        "},{"location":"help/troubleshooting/#how-do-i-tell-if-harvest-is-doing-anything","title":"How do I tell if Harvest is doing anything?","text":"

        You believe Harvest is installed fine, but it's not working.

        • Post the contents of your harvest.yml

        Try validating your harvest.yml with yamllint like so: yamllint -d relaxed harvest.yml If you do not have yamllint installed, look here.

        There should be no errors - warnings like the following are fine:

        harvest.yml\n  64:1      warning  too many blank lines (3 > 0)  (empty-lines)\n

        • How did you start Harvest?

        • What do you see in /var/log/harvest/*

        • What does ps aux | grep poller show?

        • If you are using Prometheus, try hitting Harvest's Prometheus endpoint like so:

        curl http://machine-this-is-running-harvest:prometheus-port-in-harvest-yaml/metrics

        • Check file ownership (user/group) and file permissions of your templates, executable, etc in your Harvest home directory (ls -la /opt/harvest/) See also.
        "},{"location":"help/troubleshooting/#how-do-i-start-harvest-in-debug-mode","title":"How do I start Harvest in debug mode?","text":"

        Use the --debug flag when starting a poller to enable debug logging (--debug is shorthand for --loglevel 1). Another useful flag is --foreground, which causes all log messages to be written to the terminal. Note that you can only start one poller in foreground mode.

        The amount of logged information can be controlled with the --loglevel flag followed by an integer value. The integer values are as follows: - 0: Trace - 1: Debug - 2: Info (default) - 3: Warning - 4: Error - 5: Critical

        Examples:

        bin/harvest start $POLLER_NAME --foreground --debug\nor\nbin/harvest start $POLLER_NAME --loglevel=1 --collectors Zapi --objects Qtree\n
        "},{"location":"help/troubleshooting/#how-do-i-start-harvest-in-foreground-mode","title":"How do I start Harvest in foreground mode?","text":"

        See How do I start Harvest in debug mode?

        "},{"location":"help/troubleshooting/#how-do-i-start-my-poller-with-only-one-collector","title":"How do I start my poller with only one collector?","text":"

        Since a poller will start a large number of collectors (each collector-object pair is treated as a collector), it is often hard to find the issue you are looking for in the abundance of log messages. It might be therefore useful to start one single collector-object pair when troubleshooting. You can use the --collectors and --objects flags for that. For example, start only the ZapiPerf collector with the SystemNode object:

        harvest start my_poller --collectors ZapiPerf --objects SystemNode

        (To find to correct object name, check conf/COLLECTOR/default.yaml file of the collector).

        "},{"location":"help/troubleshooting/#errors-in-the-log-file","title":"Errors in the log file","text":""},{"location":"help/troubleshooting/#some-of-my-clusters-are-not-showing-up-in-grafana","title":"Some of my clusters are not showing up in Grafana","text":"

        The logs show these errors:

        context deadline exceeded (Client.Timeout or context cancellation while reading body)\n\nand then for each volume\n\nskipped instance [9c90facd-3730-48f1-b55c-afacc35c6dbe]: not found in cache\n

        "},{"location":"help/troubleshooting/#workarounds","title":"Workarounds","text":"

        context deadline exceeded (Client.Timeout or context cancellation while reading body)

        means Harvest is timing out when talking to your cluster. This sometimes happens when you have a large number of resources (e.g. volumes).

        There are a few parameters that you can change to avoid this from happening. You can do this by editing the subtemplate of the resource affected. E.g. you can add the parameters in conf/zapiperf/cdot/9.8.0/volume.yaml or conf/zapi/cdot/9.8.0/volume.yaml. If the errors happen for most of the resources, you can add them in the main template of the collector (conf/zapi/default.yaml or conf/zapiperf/default.yaml) to apply them on all objects.

        "},{"location":"help/troubleshooting/#client_timeout","title":"client_timeout","text":"

        Increase the client_timeout value by adding a client_timeout line at the beginning of the template, like so:

        # increase the timeout to 1 minute\nclient_timeout: 1m\n
        "},{"location":"help/troubleshooting/#batch_size","title":"batch_size","text":"

        Decrease the batch_size value by adding a batch_size line at the beginning of the template. The default value of this parameter is 500. By decreasing it, the collector will fetch less instances during each API request. Example:

        # decrease number of instances to 200 for each API request\nbatch_size: 200\n
        "},{"location":"help/troubleshooting/#schedule","title":"schedule","text":"

        If nothing else helps, you can increase the data poll interval of the collector (default is 1m for ZapiPerf and 3m for Zapi). You can do this either by adding a schedule attribute to the template or, if it already exists, by changing the - data line.

        Example for ZapiPerf:

        # increase data poll frequency to 2 minutes\nschedule:\n  - counter: 20m\n  - instance: 10m\n  - data: 2m\n
        Example for Zapi:

        # increase data poll frequency to 5 minutes\nschedule:\n  - instance: 10m\n  - data: 5m\n
        "},{"location":"help/troubleshooting/#prometheus-http-service-discovery-doesnt-work","title":"Prometheus HTTP Service Discovery doesn't work","text":"

        Some things to check:

        • Make sure the Harvest admin node is started via bin/harvest admin start and there are no errors printed to the console
        • Make sure your harvest.yml includes a valid Admin: section
        • Ensure bin/harvest doctor runs without error. If it does, include the output of bin/harvest doctor --print in Slack or your GitHub issue
        • Ensure your /etc/prometheus/prometheus.yml has a scrape config with http_sd_configs and it points to the admin node's ip:port
        • Ensure there are no errors in your poller logs (/var/log/harvest) related to the poller publishing its Prometheus port to the admin node. Something like this should help narrow it down: grep -R -E \"error.*poller.go\" /var/log/harvest/
          • If you see errors like dial udp 1.1.1.1:80: connect: network is unreachable, make sure your machine has a default route setup for your main interface
        • If the admin node is running, your harvest.yml includes the Admin: section, and your pollers are using the Prometheus exporter you should be able to curl the admin node endpoint for a list of running Harvest pollers like this:
          curl -s -k https://localhost:8887/api/v1/sd | jq .\n[\n  {\n    \"targets\": [\n      \":12994\"\n    ],\n    \"labels\": {\n      \"__meta_poller\": \"F2240-127-26\"\n    }\n  },\n  {\n    \"targets\": [\n      \":39000\"\n    ],\n    \"labels\": {\n      \"__meta_poller\": \"simple1\"\n    }\n  }\n]\n
        "},{"location":"help/troubleshooting/#how-do-i-run-harvest-commands-in-nabox","title":"How do I run Harvest commands in NAbox?","text":"

        NAbox is a vApp running Alpine Linux and Docker. NAbox runs Harvest as a set of Docker containers. That means to execute Harvest commands on NAbox, you need to exec into the container by following these commands.

        1. ssh into your NAbox instance

        2. Start bash in the Harvest container

        dc exec nabox-harvest2 bash\n

        You should see no errors and your prompt will change to something like root@nabox-harvest2:/app#

        Below are examples of running Harvest commands against a cluster named umeng-aff300-05-06. Replace with your cluster name as appropriate.

        # inside container\n\n> cat /etc/issue\nDebian GNU/Linux 10 \\n \\l\n\n> cd /netapp-harvest\nbin/harvest version\nharvest version 22.08.0-1 (commit 93db10a) (build date 2022-08-19T09:10:05-0400) linux/amd64\nchecking GitHub for latest... you have the latest \u2713\n\n# harvest.yml is found at /conf/harvest.yml\n\n> bin/zapi --poller umeng-aff300-05-06 show system\nconnected to umeng-aff300-05-06 (NetApp Release 9.9.1P9X3: Tue Apr 19 19:05:24 UTC 2022)\n[results]                                          -                                   *\n  [build-timestamp]                                -                          1650395124\n  [is-clustered]                                   -                                true\n  [version]                                        - NetApp Release 9.9.1P9X3: Tue Apr 19 19:05:24 UTC 2022\n  [version-tuple]                                  -                                   *\n    [system-version-tuple]                         -                                   *\n      [generation]                                 -                                   9\n      [major]                                      -                                   9\n      [minor]                                      -                                   1\n\nbin/zapi -p umeng-aff300-05-06 show data --api environment-sensors-get-iter --max 10000 > env-sensor.xml\n

        The env-sensor.xml file will be written to the /opt/packages/harvest2 directory on the host.

        If needed, you can scp that file off NAbox and share it with the Harvest team.

        "},{"location":"help/troubleshooting/#rest-collector-auth-errors","title":"Rest Collector Auth errors?","text":"

        If you are seeing errors like User is not authorized or not authorized for that command while using Rest Collector. Follow below steps to make sure permissions are set correctly.

        1. Verify that user has permissions for relevant authentication method.

        security login show -vserver ROOT_VSERVER -user-or-group-name harvest2 -application http

        1. Verify that user has read-only permissions to api.
        security login role show -role harvest2-role\n

        1. Verify if an entry is present for following command.
        vserver services web access show -role harvest2-role -name rest\n

        If It is missing then add an entry with following commands

        vserver services web access create -vserver umeng-aff300-01-02 -name rest -role harvest2-role\n
        "},{"location":"help/troubleshooting/#why-do-i-have-gaps-in-my-dashboards","title":"Why do I have gaps in my dashboards?","text":"

        Here are possible reasons and things to check:

        • Prometheus scrape_interval found via (http://$promIP:9090/config)
        • Prometheus log files
        • Harvest collector scrape interval check your:
          • conf/zapi/default.yaml - default for config is 3m
          • conf/zapiperf/default.yaml - default of perf is 1m
        • Check you poller logs for any errors or lag messages
        • When using VictoriaMetrics, make sure your Prometheus exporter config includes sort_labels: true, since VictoriaMetrics will mark series stale if the label order changes between polls.
        "},{"location":"help/troubleshooting/#nabox","title":"NABox","text":"

        For NABox installations, refer to the NABox documentation on troubleshooting:

        NABox Troubleshooting

        "},{"location":"install/containerd/","title":"Containerized Harvest on Mac using containerd","text":"

        Harvest runs natively on a Mac already. If you need that, git clone and use GOOS=darwin make build.

        This page describes how to run Harvest on your Mac in a containerized environment (Compose, K8, etc.) The documentation below uses Rancher Desktop, but lima works just as well. Keep in mind, both of them are considered alpha. They work, but are still undergoing a lot of change.

        "},{"location":"install/containerd/#setup","title":"Setup","text":"

        We're going to: - Install and Start Rancher Desktop - (Optional) Create Harvest Docker image by following Harvest's existing documentation - Generate a Compose file following Harvest existing documentation - Concatenate the Prometheus/Grafana compose file with the harvest compose file since Rancher doesn't support multiple compose files yet - Fixup the concatenated file - Start containers

        Under the hood, Rancher is using lima. If you want to skip Rancher and use lima directly that works too.

        "},{"location":"install/containerd/#install-and-start-rancher-desktop","title":"Install and Start Rancher Desktop","text":"

        We'll use brew to install Rancher.

        brew install rancher\n

        After Rancher Desktop installs, start it Cmd + Space type: Rancher and wait for it to start a VM and download images. Once everything is started continue.

        "},{"location":"install/containerd/#create-harvest-docker-image","title":"Create Harvest Docker image","text":"

        You only need to create a new image if you've made changes to Harvest. If you just want to use the latest version of Harvest, skip this step.

        These are the same steps outline on Building Harvest Docker Image except we replace docker build with nerdctl like so:

        source .harvest.env\nnerdctl build -f container/onePollerPerContainer/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t harvest:latest . --no-cache \n
        "},{"location":"install/containerd/#generate-a-harvest-compose-file","title":"Generate a Harvest compose file","text":"

        Follow the existing documentation to set up your harvest.yml file

        Create your harvest-compose.yml file like this:

        docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml # --image tag, if you built a new image above\n
        "},{"location":"install/containerd/#combine-prometheusgrafana-and-harvest-compose-file","title":"Combine Prometheus/Grafana and Harvest compose file","text":"

        Currently nerdctl compose does not support running with multiple compose files, so we'll concat the prom-stack.yml and the harvest-compose.yml into one file and then fix it up.

        cat prom-stack.yml harvest-compose.yml > both.yml\n\n# jump to line 45 and remove redundant version and services lines (lines 45, 46, 47 should be removed)\n# fix indentation of remaining lines - in vim, starting at line 46\n# Shift V\n# Shift G\n# Shift .\n# Esc\n# Shift ZZ\n
        "},{"location":"install/containerd/#start-containers","title":"Start containers","text":"
        nerdctl compose -f both.yml up -d\n\nnerdctl ps -a\n\nCONTAINER ID    IMAGE                               COMMAND                   CREATED               STATUS    PORTS                       NAMES\nbd7131291960    docker.io/grafana/grafana:latest    \"/run.sh\"                 About a minute ago    Up        0.0.0.0:3000->3000/tcp      grafana\nf911553a14e2    docker.io/prom/prometheus:latest    \"/bin/prometheus --c\u2026\"    About a minute ago    Up        0.0.0.0:9090->9090/tcp      prometheus\n037a4785bfad    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15007->15007/tcp    poller_simple7_v21.11.0513\n03fb951cfe26    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    59 seconds ago        Up        0.0.0.0:15025->15025/tcp    poller_simple25_v21.11.0513\n049d0d65b434    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:16050->16050/tcp    poller_simple49_v21.11.0513\n0b77dd1bc0ff    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:16067->16067/tcp    poller_u2_v21.11.0513\n1cabd1633c6f    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15015->15015/tcp    poller_simple15_v21.11.0513\n1d78c1bf605f    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15062->15062/tcp    poller_sandhya_v21.11.0513\n286271eabc1d    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15010->15010/tcp    poller_simple10_v21.11.0513\n29710da013d4    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:12990->12990/tcp    poller_simple1_v21.11.0513\n321ae28637b6    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15020->15020/tcp    poller_simple20_v21.11.0513\n39c91ae54d68    docker.io/library/cbg:latest        \"bin/poller --poller\u2026\"    About a minute ago    Up        0.0.0.0:15053->15053/tcp    poller_simple-53_v21.11.0513\n\nnerdctl logs poller_simple1_v21.11.0513\nnerdctl compose -f both.yml down\n\n# http://localhost:9090/targets   Prometheus\n# http://localhost:3000           Grafana\n# http://localhost:15062/metrics  Poller metrics\n
        "},{"location":"install/containers/","title":"Docker","text":""},{"location":"install/containers/#overview","title":"Overview","text":"

        Harvest is container-ready and supports several deployment options:

        • Stand-up Prometheus, Grafana, and Harvest via Docker Compose. Choose this if you want to hit the ground running. Install, volume and network mounts automatically handled.

        • Stand-up Harvest via Docker Compose that offers more flexibility in configuration. Choose this if you only want to run Harvest containers. Since you pick-and-choose what gets built and how it's deployed, stronger familiarity with containers is recommended.

        • If you prefer Ansible, David Blackwell created an Ansible script that stands up Harvest, Grafana, and Prometheus.

        • Want to run Harvest on a Mac via containerd and Rancher Desktop? We got you covered.

        • K8 Deployment via Kompose

        "},{"location":"install/containers/#docker-compose","title":"Docker Compose","text":"

        This is a quick way to install and get started with Harvest. Follow the four steps below to:

        • Setup Harvest, Grafana, and Prometheus via Docker Compose
        • Harvest dashboards are automatically imported and setup in Grafana with a Prometheus data source
        • A separate poller container is created for each monitored cluster
        • All pollers are automatically added as Prometheus scrape targets
        "},{"location":"install/containers/#setup-harvestyml","title":"Setup harvest.yml","text":"
        • Create a harvest.yml file with your cluster details, below is an example with annotated comments. Modify as needed for your scenario.

        This config is using the Prometheus exporter port_range feature, so you don't have to manage the Prometheus exporter port mappings for each poller.

        Exporters:\n  prometheus1:\n    exporter: Prometheus\n    addr: 0.0.0.0\n    port_range: 2000-2030  # <====== adjust to be greater than equal to the number of monitored clusters\n\nDefaults:\n  collectors:\n    - Zapi\n    - ZapiPerf\n    - EMS\n  use_insecure_tls: true   # <====== adjust as needed to enable/disable TLS checks \n  exporters:\n    - prometheus1\n\nPollers:\n  infinity:                # <====== add your cluster(s) here, they use the exporter defined three lines above\n    datacenter: DC-01\n    addr: 10.0.1.2\n    auth_style: basic_auth\n    username: user\n    password: 123#abc\n  # next cluster ....  \n
        "},{"location":"install/containers/#generate-a-docker-compose-for-your-pollers","title":"Generate a Docker compose for your Pollers","text":"
        • Generate a Docker compose file from your harvest.yml
        docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml\n

        By default, the above command uses the harvest configuration file(harvest.yml) located in the current directory. If you want to use a harvest config from a different location.

        What if my harvest configuration file is somewhere else or not named harvest.yml

        Use the following docker run command, updating the HYML variable with the absolute path to your harvest.yml.

        HYML=\"/opt/custom_harvest.yml\"; \\\ndocker run --rm \\\n--env UID=$(id -u) --env GID=$(id -g) \\\n--entrypoint \"bin/harvest\" \\\n--volume \"$(pwd):/opt/temp\" \\\n--volume \"${HYML}:${HYML}\" \\\nghcr.io/netapp/harvest:latest \\\ngenerate docker full \\\n--output harvest-compose.yml \\\n--config \"${HYML}\"\n

        generate docker full does two things:

        1. Creates a Docker compose file with a container for each Harvest poller defined in your harvest.yml
        2. Creates a matching Prometheus service discovery file for each Harvest poller (located in container/prometheus/harvest_targets.yml). Prometheus uses this file to scrape the Harvest pollers.
        "},{"location":"install/containers/#start-everything","title":"Start everything","text":"

        Bring everything up

        docker compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans\n
        "},{"location":"install/containers/#note-on-docker-logging-configuration","title":"Note on Docker Logging Configuration","text":"

        By default, Docker uses the json-file logging driver which does not limit the size of the logs. This can cause your system to run out of disk space. Docker provides several options for logging configuration, including different logging drivers and options for log rotation.

        Docker recommends using the local driver to prevent disk-exhaustion. More details can be found in Docker logging documentation

        "},{"location":"install/containers/#prometheus-and-grafana","title":"Prometheus and Grafana","text":"

        The prom-stack.yml compose file creates a frontend and backend network. Prometheus and Grafana publish their admin ports on the front-end network and are routable to the local machine. By default, the Harvest pollers are part of the backend network and also expose their Prometheus web end-points. If you do not want their end-points exposed, add the --port=false option to the generate sub-command in the previous step.

        "},{"location":"install/containers/#prometheus","title":"Prometheus","text":"

        After bringing up the prom-stack.yml compose file, you can check Prometheus's list of targets at http://IP_OF_PROMETHEUS:9090/targets.

        "},{"location":"install/containers/#customize-prometheuss-retention-time","title":"Customize Prometheus's Retention Time","text":"

        By default, prom-stack.yml is configured for a one year data retention period. To increase this, for example, to two years, you can create a specific configuration file and make your changes there. This prevents your custom settings from being overwritten if you regenerate the default prom-stack.yml file. Here's the process:

        • Copy the original prom-stack.yml to a new file named prom-stack-prod.yml:
        cp prom-stack.yml prom-stack-prod.yml\n
        • Edit prom-stack-prod.yml to include the extended data retention setting by updating the --storage.tsdb.retention.time=2y line under the Prometheus service's command section:
        command:\n  - '--config.file=/etc/prometheus/prometheus.yml'\n  - '--storage.tsdb.path=/prometheus'\n  - '--storage.tsdb.retention.time=2y'       # Sets data retention to 2 years\n  - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n  - '--web.console.templates=/usr/share/prometheus/consoles'\n
        • Save the changes to prom-stack-prod.yml.

        Now, you can start your Docker containers with the updated configuration that includes the 1-year data retention period by executing the command below:

        docker compose -f prom-stack-prod.yml -f harvest-compose.yml up -d --remove-orphans\n
        "},{"location":"install/containers/#grafana","title":"Grafana","text":"

        After bringing up the prom-stack.yml compose file, you can access Grafana at http://IP_OF_GRAFANA:3000.

        You will be prompted to create a new password the first time you log in. Grafana's default credentials are

        username: admin\npassword: admin\n
        "},{"location":"install/containers/#manage-pollers","title":"Manage pollers","text":""},{"location":"install/containers/#how-do-i-add-a-new-poller","title":"How do I add a new poller?","text":"
        1. Add poller to harvest.yml
        2. Regenerate compose file by running harvest generate
        3. Run docker compose up, for example,
        docker compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans\n
        "},{"location":"install/containers/#stop-all-containers","title":"Stop all containers","text":"
        docker compose -f prom-stack.yml -f harvest-compose.yml down\n

        If you encounter the following error message while attempting to stop your Docker containers using docker-compose down

        Error response from daemon: Conflict. The container name \"/poller-u2\" is already in use by container\n

        This error is likely due to running docker-compose down from a different directory than where you initially ran docker-compose up.

        To resolve this issue, make sure to run the docker-compose down command from the same directory where you ran docker-compose up. This will ensure that Docker can correctly match the container names and IDs with the directory you are working in. Alternatively, you can stop the Harvest, Prometheus, and Grafana containers by using the following command:

        docker ps -aq --filter \"name=prometheus\" --filter \"name=grafana\" --filter \"name=poller-\" | xargs docker stop | xargs docker rm\n

        Note: Deleting or stopping Docker containers does not remove the data stored in Docker volumes.

        "},{"location":"install/containers/#upgrade-harvest","title":"Upgrade Harvest","text":"

        Note: If you want to keep your historical Prometheus data, and you set up your Docker Compose workflow before Harvest 22.11, please read how to migrate your Prometheus volume before continuing with the upgrade steps below.

        If you need to customize your Prometheus configuration, such as changing the data retention period, please refer to the instructions on customizing the Prometheus configuration.

        To upgrade Harvest:

        1. Retrieve the most recent version of the Harvest Docker image by executing the following command.This is needed since the new version may contain new templates, dashboards, or other files not included in the Docker image.

          docker pull ghcr.io/netapp/harvest\n

        2. Stop all containers

        3. Regenerate your harvest-compose.yml file by running harvest generate. Make sure you don't skip this step. It is essential as it updates local copies of templates and dashboards, which are then mounted to the containers. If this step is skipped, Harvest will run with older templates and dashboards which will likely cause problems. By default, generate will use the latest tag. If you want to upgrade to a nightly build see the twisty.

          I want to upgrade to a nightly build

          Tell the generate cmd to use a different tag like so:

          docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest:nightly \\\n  generate docker full \\\n  --image ghcr.io/netapp/harvest:nightly \\\n  --output harvest-compose.yml\n
        4. Restart your containers using the following:

          docker compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans\n
          Troubleshooting

          If you encounter the following error:

          network harvest_backend was found but has incorrect label com.docker.compose.network set to \"harvest_backend\"\n

          Remove the conflicting networks:

          docker network rm harvest_backend harvest_frontend\n

          Then, restart your containers again using the command above.

        "},{"location":"install/containers/#building-harvest-docker-image","title":"Building Harvest Docker Image","text":"

        Building a custom Harvest Docker image is only necessary if you require a tailored solution. If your intention is to run Harvest using Docker without any customizations, please refer to the Overview section above.

        source .harvest.env\ndocker build -f container/onePollerPerContainer/Dockerfile --build-arg GO_VERSION=${GO_VERSION} -t harvest:latest . --no-cache\n
        "},{"location":"install/harvest-containers/","title":"Harvest containers","text":"

        Follow this method if your goal is to establish a separate harvest container for each poller defined in harvest.yml file. Please note that these containers must be incorporated into your current infrastructure, which might include systems like Prometheus or Grafana.

        "},{"location":"install/harvest-containers/#setup-harvestyml","title":"Setup harvest.yml","text":"
        • Create a harvest.yml file with your cluster details, below is an example with annotated comments. Modify as needed for your scenario.

        This config is using the Prometheus exporter port_range feature, so you don't have to manage the Prometheus exporter port mappings for each poller.

        Exporters:\n  prometheus1:\n    exporter: Prometheus\n    addr: 0.0.0.0\n    port_range: 2000-2030  # <====== adjust to be greater than equal to the number of monitored clusters\n\nDefaults:\n  collectors:\n    - Zapi\n    - ZapiPerf\n    - EMS\n  use_insecure_tls: true   # <====== adjust as needed to enable/disable TLS checks \n  exporters:\n    - prometheus1\n\nPollers:\n  infinity:                # <====== add your cluster(s) here, they use the exporter defined three lines above\n    datacenter: DC-01\n    addr: 10.0.1.2\n    auth_style: basic_auth\n    username: user\n    password: 123#abc\n  # next cluster ....  \n
        "},{"location":"install/harvest-containers/#generate-a-docker-compose-for-your-pollers","title":"Generate a Docker compose for your Pollers","text":"
        • Generate a Docker compose file from your harvest.yml
        docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker \\\n  --output harvest-compose.yml\n
        "},{"location":"install/harvest-containers/#start-everything","title":"Start everything","text":"

        Bring everything up

        docker compose -f harvest-compose.yml up -d --remove-orphans\n
        "},{"location":"install/harvest-containers/#manage-pollers","title":"Manage pollers","text":""},{"location":"install/harvest-containers/#how-do-i-add-a-new-poller","title":"How do I add a new poller?","text":"
        1. Add poller to harvest.yml
        2. Regenerate compose file by running harvest generate
        3. Run docker compose up, for example,
        docker compose -f harvest-compose.yml up -d --remove-orphans\n
        "},{"location":"install/harvest-containers/#stop-all-containers","title":"Stop all containers","text":"
        docker compose-f harvest-compose.yml down\n
        "},{"location":"install/harvest-containers/#upgrade-harvest","title":"Upgrade Harvest","text":"

        To upgrade Harvest:

        1. Retrieve the most recent version of the Harvest Docker image by executing the following command.This is needed since the new version may contain new templates, dashboards, or other files not included in the Docker image.

          docker pull ghcr.io/netapp/harvest\n

        2. Stop all containers

        3. Regenerate your harvest-compose.yml file by running harvest generate By default, generate will use the latest tag. If you want to upgrade to a nightly build see the twisty.

          I want to upgrade to a nightly build

          Tell the generate cmd to use a different tag like so:

          docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest:nightly \\\n  generate docker \\\n  --image ghcr.io/netapp/harvest:nightly \\\n  --output harvest-compose.yml\n
        4. Restart your containers using the following:

        docker compose -f harvest-compose.yml up -d --remove-orphans\n
        "},{"location":"install/k8/","title":"K8 Deployment","text":"

        The following steps are provided for reference purposes only. Depending on the specifics of your k8 configuration, you may need to make modifications to the steps or files as necessary.

        "},{"location":"install/k8/#requirements","title":"Requirements","text":"
        • Kompose: v1.25 or higher
        "},{"location":"install/k8/#deployment","title":"Deployment","text":"
        • Local k8 Deployment
        • Cloud Deployment
        "},{"location":"install/k8/#local-k8-deployment","title":"Local k8 Deployment","text":"

        To run Harvest resources in Kubernetes, please execute the following commands:

        1. After adding your clusters to harvest.yml, generate harvest-compose.yml and prom-stack.yml.
        docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml\n
        example harvest.yml

        Tools:\nExporters:\n    prometheus1:\n        exporter: Prometheus\n        port_range: 12990-14000\nDefaults:\n    use_insecure_tls: true\n    collectors:\n      - Zapi\n      - ZapiPerf\n    exporters:\n      - prometheus1\nPollers:\n    u2:\n        datacenter: u2\n        addr: ADDRESS\n        username: USER\n        password: PASS\n

        harvest-compose.yml

        version: \"3.7\"\n\nservices:\n\n  u2:\n    image: ghcr.io/netapp/harvest:latest\n    container_name: poller-u2\n    restart: unless-stopped\n    ports:\n      - 12990:12990\n    command: '--poller u2 --promPort 12990 --config /opt/harvest.yml'\n    volumes:\n      - /Users/harvest/conf:/opt/harvest/conf\n      - /Users/harvest/cert:/opt/harvest/cert\n      - /Users/harvest/harvest.yml:/opt/harvest.yml\n    networks:\n      - backend\n

        1. Using kompose, convert harvest-compose.yml and prom-stack.yml into Kubernetes resources and save them as kub.yaml.
        kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n
        kub.yaml

        ---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.service.type: nodeport\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: grafana\n  name: grafana\nspec:\n  ports:\n    - name: \"3000\"\n      port: 3000\n      targetPort: 3000\n  selector:\n    io.kompose.service: grafana\n  type: NodePort\nstatus:\n  loadBalancer: {}\n\n---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.service.type: nodeport\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: prometheus\n  name: prometheus\nspec:\n  ports:\n    - name: \"9090\"\n      port: 9090\n      targetPort: 9090\n  selector:\n    io.kompose.service: prometheus\n  type: NodePort\nstatus:\n  loadBalancer: {}\n\n---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2\nspec:\n  ports:\n    - name: \"12990\"\n      port: 12990\n      targetPort: 12990\n  selector:\n    io.kompose.service: u2\nstatus:\n  loadBalancer: {}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.service.type: nodeport\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: grafana\n  name: grafana\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      io.kompose.service: grafana\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      annotations:\n        kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n        kompose.service.type: nodeport\n        kompose.version: 1.28.0 (HEAD)\n      creationTimestamp: null\n      labels:\n        io.kompose.network/harvest-backend: \"true\"\n        io.kompose.network/harvest-frontend: \"true\"\n        io.kompose.service: grafana\n    spec:\n      containers:\n        - image: grafana/grafana:8.3.4\n          name: grafana\n          ports:\n            - containerPort: 3000\n          resources: {}\n          volumeMounts:\n            - mountPath: /var/lib/grafana\n              name: grafana-data\n            - mountPath: /etc/grafana/provisioning\n              name: grafana-hostpath1\n      restartPolicy: Always\n      volumes:\n        - hostPath:\n            path: /Users/harvest\n          name: grafana-data\n        - hostPath:\n            path: /Users/harvest/grafana\n          name: grafana-hostpath1\nstatus: {}\n\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  creationTimestamp: null\n  name: harvest-backend\nspec:\n  ingress:\n    - from:\n        - podSelector:\n            matchLabels:\n              io.kompose.network/harvest-backend: \"true\"\n  podSelector:\n    matchLabels:\n      io.kompose.network/harvest-backend: \"true\"\n\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  creationTimestamp: null\n  name: harvest-frontend\nspec:\n  ingress:\n    - from:\n        - podSelector:\n            matchLabels:\n              io.kompose.network/harvest-frontend: \"true\"\n  podSelector:\n    matchLabels:\n      io.kompose.network/harvest-frontend: \"true\"\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.service.type: nodeport\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: prometheus\n  name: prometheus\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      io.kompose.service: prometheus\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      annotations:\n        kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n        kompose.service.type: nodeport\n        kompose.version: 1.28.0 (HEAD)\n      creationTimestamp: null\n      labels:\n        io.kompose.network/harvest-backend: \"true\"\n        io.kompose.service: prometheus\n    spec:\n      containers:\n        - args:\n            - --config.file=/etc/prometheus/prometheus.yml\n            - --storage.tsdb.path=/prometheus\n            - --web.console.libraries=/usr/share/prometheus/console_libraries\n            - --web.console.templates=/usr/share/prometheus/consoles\n          image: prom/prometheus:v2.33.1\n          name: prometheus\n          ports:\n            - containerPort: 9090\n          resources: {}\n          volumeMounts:\n            - mountPath: /etc/prometheus\n              name: prometheus-hostpath0\n            - mountPath: /prometheus\n              name: prometheus-data\n      restartPolicy: Always\n      volumes:\n        - hostPath:\n            path: /Users/harvest/container/prometheus\n          name: prometheus-hostpath0\n        - hostPath:\n            path: /Users/harvest\n          name: prometheus-data\nstatus: {}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      io.kompose.service: u2\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      annotations:\n        kompose.cmd: kompose convert --file harvest-compose.yml --file prom-stack.yml --out kub.yaml --volumes hostPath\n        kompose.version: 1.28.0 (HEAD)\n      creationTimestamp: null\n      labels:\n        io.kompose.network/harvest-backend: \"true\"\n        io.kompose.service: u2\n    spec:\n      containers:\n        - args:\n            - --poller\n            - u2\n            - --promPort\n            - \"12990\"\n            - --config\n            - /opt/harvest.yml\n          image: ghcr.io/netapp/harvest:latest\n          name: poller-u2\n          ports:\n            - containerPort: 12990\n          resources: {}\n          volumeMounts:\n            - mountPath: /opt/harvest/conf\n              name: u2-hostpath0\n            - mountPath: /opt/harvest/cert\n              name: u2-hostpath1\n            - mountPath: /opt/harvest.yml\n              name: u2-hostpath2\n      restartPolicy: Always\n      volumes:\n        - hostPath:\n            path: /Users/harvest/conf\n          name: u2-hostpath0\n        - hostPath:\n            path: /Users/harvest/cert\n          name: u2-hostpath1\n        - hostPath:\n            path: /Users/harvest/harvest.yml\n          name: u2-hostpath2\nstatus: {}\n

        1. Apply kub.yaml to k8.
        kubectl apply --filename kub.yaml\n
        1. List running pods.
        kubectl get pods\n
        pods

        NAME                          READY   STATUS    RESTARTS   AGE\nprometheus-666fc7b64d-xfkvk   1/1     Running   0          43m\ngrafana-7cd8bdc9c9-wmsxh      1/1     Running   0          43m\nu2-7dfb76b5f6-zbfm6           1/1     Running   0          43m\n

        "},{"location":"install/k8/#remove-all-harvest-resources-from-k8","title":"Remove all Harvest resources from k8","text":"

        kubectl delete --filename kub.yaml

        "},{"location":"install/k8/#helm-chart","title":"Helm Chart","text":"

        Generate helm charts

        kompose convert --file harvest-compose.yml --file prom-stack.yml --chart --volumes hostPath --out harvestchart\n
        "},{"location":"install/k8/#cloud-deployment","title":"Cloud Deployment","text":"

        We will use configMap to generate Kubernetes resources for deploying Harvest pollers in a cloud environment. Please note the following assumptions for the steps below:

        • The steps provided are solely for the deployment of Harvest poller pods. Separate configurations are required to set up Prometheus and Grafana.
        • Networking between Harvest and Prometheus must be configured, and this can be accomplished by adding the network configuration in harvest-compose.yaml.

        • After configuring the clusters in harvest.yml, generate harvest-compose.yml. We also want to remove the conf directory from the harvest-compose.yml file, otherwise kompose will create an empty configMap for it. We'll remove the conf directory by commenting out that line using sed.

        docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml\n\nsed -i '/\\/conf/s/^/#/g' harvest-compose.yml\n
        harvest-compose.yml

        version: \"3.7\"\n\nservices:\n\n  u2:\n    image: ghcr.io/netapp/harvest:latest\n    container_name: poller-u2\n    restart: unless-stopped\n    ports:\n      - 12990:12990\n    command: '--poller u2 --promPort 12990 --config /opt/harvest.yml'\n    volumes:\n      #      - /Users/harvest/conf:/opt/harvest/conf\n      - /Users/harvest/cert:/opt/harvest/cert\n      - /Users/harvest/harvest.yml:/opt/harvest.yml\n

        1. Using kompose, convert harvest-compose.yml into Kubernetes resources and save them as kub.yaml.
        kompose convert --file harvest-compose.yml --volumes configMap -o kub.yaml\n
        kub.yaml

        ---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --volumes configMap -o kub.yaml\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2\nspec:\n  ports:\n    - name: \"12990\"\n      port: 12990\n      targetPort: 12990\n  selector:\n    io.kompose.service: u2\nstatus:\n  loadBalancer: {}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  annotations:\n    kompose.cmd: kompose convert --file harvest-compose.yml --volumes configMap -o kub.yaml\n    kompose.version: 1.28.0 (HEAD)\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      io.kompose.service: u2\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      annotations:\n        kompose.cmd: kompose convert --file harvest-compose.yml --volumes configMap -o kub.yaml\n        kompose.version: 1.28.0 (HEAD)\n      creationTimestamp: null\n      labels:\n        io.kompose.network/harvest-default: \"true\"\n        io.kompose.service: u2\n    spec:\n      containers:\n        - args:\n            - --poller\n            - u2\n            - --promPort\n            - \"12990\"\n            - --config\n            - /opt/harvest.yml\n          image: ghcr.io/netapp/harvest:latest\n          name: poller-u2\n          ports:\n            - containerPort: 12990\n          resources: {}\n          volumeMounts:\n            - mountPath: /opt/harvest/cert\n              name: u2-cm0\n            - mountPath: /opt/harvest.yml\n              name: u2-cm1\n              subPath: harvest.yml\n      restartPolicy: Always\n      volumes:\n        - configMap:\n            name: u2-cm0\n          name: u2-cm0\n        - configMap:\n            items:\n              - key: harvest.yml\n                path: harvest.yml\n            name: u2-cm1\n          name: u2-cm1\nstatus: {}\n\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2-cm0\n\n---\napiVersion: v1\ndata:\n  harvest.yml: |+\n    Tools:\n    Exporters:\n        prometheus1:\n            exporter: Prometheus\n            port_range: 12990-14000\n            add_meta_tags: false\n    Defaults:\n        use_insecure_tls: true\n        prefer_zapi: true\n    Pollers:\n\n        u2:\n            datacenter: u2\n            addr: ADDRESS\n            username: USER\n            password: PASS\n            collectors:\n                - Rest\n            exporters:\n                - prometheus1\n\nkind: ConfigMap\nmetadata:\n  annotations:\n    use-subpath: \"true\"\n  creationTimestamp: null\n  labels:\n    io.kompose.service: u2\n  name: u2-cm1\n\n---\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  creationTimestamp: null\n  name: harvest-default\nspec:\n  ingress:\n    - from:\n        - podSelector:\n            matchLabels:\n              io.kompose.network/harvest-default: \"true\"\n  podSelector:\n    matchLabels:\n      io.kompose.network/harvest-default: \"true\"\n

        1. Apply kub.yaml to k8.
        kubectl apply --filename kub.yaml\n
        1. List running pods.
        kubectl get pods\n
        pods

        NAME                  READY   STATUS    RESTARTS   AGE\nu2-6864cc7dbc-v6444   1/1     Running   0          6m27s\n

        "},{"location":"install/k8/#remove-all-harvest-resources-from-k8_1","title":"Remove all Harvest resources from k8","text":"

        kubectl delete --filename kub.yaml

        "},{"location":"install/k8/#helm-chart_1","title":"Helm Chart","text":"

        Generate helm charts

        kompose convert --file harvest-compose.yml --chart --volumes configMap --out harvestchart\n
        "},{"location":"install/native/","title":"Native","text":""},{"location":"install/native/#installation","title":"Installation","text":"

        Visit the Releases page and copy the tar.gz link for the latest release. For example, to download the 23.08.0 release:

        VERSION=23.08.0\nwget https://github.com/NetApp/harvest/releases/download/v${VERSION}/harvest-${VERSION}-1_linux_amd64.tar.gz\ntar -xvf harvest-${VERSION}-1_linux_amd64.tar.gz\ncd harvest-${VERSION}-1_linux_amd64\n\n# Run Harvest with the default unix localhost collector\nbin/harvest start\n
        With curl

        If you don't have wget installed, you can use curl like so:

        curl -L -O https://github.com/NetApp/harvest/releases/download/v22.08.0/harvest-22.08.0-1_linux_amd64.tar.gz\n
        "},{"location":"install/native/#upgrade","title":"Upgrade","text":"

        Stop Harvest:

        cd <existing harvest directory>\nbin/harvest stop\n

        Verify that all pollers have stopped:

        bin/harvest status\nor\npgrep --full '\\-\\-poller'  # should return nothing if all pollers are stopped\n

        Download the latest release and extract it to a new directory. For example, to upgrade to the 23.11.0 release:

        VERSION=23.11.0\nwget https://github.com/NetApp/harvest/releases/download/v${VERSION}/harvest-${VERSION}-1_linux_amd64.tar.gz\ntar -xvf harvest-${VERSION}-1_linux_amd64.tar.gz\ncd harvest-${VERSION}-1_linux_amd64\n

        Copy your old harvest.yml into the new install directory:

        cp /path/to/old/harvest/harvest.yml /path/to/new/harvest/harvest.yml\n

        After upgrade, re-import all dashboards (either bin/harvest grafana import cli or via the Grafana UI) to get any new enhancements in dashboards. For more details, see the dashboards documentation.

        It's best to run Harvest as a non-root user. Make sure the user running Harvest can write to /var/log/harvest/ or tell Harvest to write the logs somewhere else with the HARVEST_LOGS environment variable.

        If something goes wrong, examine the logs files in /var/log/harvest, check out the troubleshooting section on the wiki and jump onto Discord and ask for help.

        "},{"location":"install/overview/","title":"Overview","text":"

        Get up and running with Harvest on your preferred platform. We provide pre-compiled binaries for Linux, RPMs, Debs, as well as prebuilt container images for both nightly and stable releases.

        • Binaries for Linux
        • RPM and Debs
        • Containers
        "},{"location":"install/overview/#nabox","title":"Nabox","text":"

        Instructions on how to install Harvest via NAbox.

        "},{"location":"install/overview/#source","title":"Source","text":"

        To build Harvest from source code follow these steps.

        1. git clone https://github.com/NetApp/harvest.git
        2. cd harvest
        3. check the version of go required in the go.mod file
        4. ensure you have a working Go environment at that version or newer. Go installs found here.
        5. make build (if you want to run Harvest from a Mac use GOOS=darwin make build)
        6. bin/harvest version

        Checkout the Makefile for other targets of interest.

        "},{"location":"install/package-managers/","title":"Package Managers","text":""},{"location":"install/package-managers/#redhat","title":"Redhat","text":"

        Installation and upgrade of the Harvest package may require root or administrator privileges

        "},{"location":"install/package-managers/#installation","title":"Installation","text":"

        Download the latest rpm of Harvest from the releases tab and install with yum.

        sudo yum install harvest.XXX.rpm\n
        "},{"location":"install/package-managers/#upgrade","title":"Upgrade","text":"

        Download the latest rpm of Harvest from the releases tab and upgrade with yum.

        sudo yum upgrade harvest.XXX.rpm\n

        Once the installation or upgrade has finished, edit the harvest.yml configuration file located in /opt/harvest/harvest.yml

        After editing /opt/harvest/harvest.yml, manage Harvest with systemctl start|stop|restart harvest.

        After upgrade, re-import all dashboards (either bin/harvest grafana import cli or via the Grafana UI) to get any new enhancements in dashboards. For more details, see the dashboards documentation.

        To ensure that you don't run into permission issues, make sure you manage Harvest using systemctl instead of running the harvest binary directly.

        Changes install makes
        • Directories /var/log/harvest/ and /var/log/run/ are created
        • A harvest user and group are created and the installed files are chowned to harvest
        • Systemd /etc/systemd/system/harvest.service file is created and enabled
        "},{"location":"install/package-managers/#debian","title":"Debian","text":"

        Installation and upgrade of the Harvest package may require root or administrator privileges

        "},{"location":"install/package-managers/#installation_1","title":"Installation","text":"

        Download the latest deb of Harvest from the releases tab and install with apt.

        sudo apt install ./harvest-<RELEASE>.amd64.deb\n
        "},{"location":"install/package-managers/#upgrade_1","title":"Upgrade","text":"

        Download the latest deb of Harvest from the releases tab and upgrade with apt.

        sudo apt install --only-upgrade ./harvest-<RELEASE>.amd64.deb\n

        Once the installation or upgrade has finished, edit the harvest.yml configuration file located in /opt/harvest/harvest.yml

        After editing /opt/harvest/harvest.yml, manage Harvest with systemctl start|stop|restart harvest.

        After upgrade, re-import all dashboards (either bin/harvest grafana import cli or via the Grafana UI) to get any new enhancements in dashboards. For more details, see the dashboards documentation.

        To ensure that you don't run into permission issues, make sure you manage Harvest using systemctl instead of running the harvest binary directly.

        Changes install makes
        • Directories /var/log/harvest/ and /var/log/run/ are created
        • A harvest user and group are created and the installed files are chowned to harvest
        • Systemd /etc/systemd/system/harvest.service file is created and enabled
        "},{"location":"install/podman/","title":"Containerized Harvest on Linux using Rootless Podman","text":"

        RHEL 8 ships with Podman instead of Docker. There are two ways to run containers with Podman: rootless or with root. Both setups are outlined below. The Podman ecosystem is changing rapidly so the shelf life of these instructions may be short. Make sure you have at least the same versions of the tools listed below.

        If you don't want to bother with Podman, you can also install Docker on RHEL 8 and use it to run Harvest per normal.

        "},{"location":"install/podman/#setup","title":"Setup","text":"

        Make sure your OS is up-to-date with yum update. Podman's dependencies are updated frequently.

        sudo yum remove docker-ce\nsudo yum module enable -y container-tools:rhel8\nsudo yum module install -y container-tools:rhel8\nsudo yum install podman podman-docker podman-plugins\n

        We also need to install Docker Compose since Podman uses it for compose workflows. Install docker-compose like this:

        VERSION=1.29.2\nsudo curl -L \"https://github.com/docker/compose/releases/download/$VERSION/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose\nsudo chmod +x /usr/local/bin/docker-compose\nsudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose\n

        After all the packages are installed, start the Podman systemd socket-activated service:

        sudo systemctl start podman.socket\n
        "},{"location":"install/podman/#containerized-harvest-on-linux-using-rootful-podman","title":"Containerized Harvest on Linux using Rootful Podman","text":"

        Make sure you're able to curl the endpoint.

        sudo curl -H \"Content-Type: application/json\" --unix-socket /var/run/docker.sock http://localhost/_ping\n

        If the sudo curl does not print OK\u23ce troubleshoot before continuing.

        Proceed to Running Harvest

        "},{"location":"install/podman/#containerized-harvest-on-linux-using-rootless-podman_1","title":"Containerized Harvest on Linux using Rootless Podman","text":"

        To run Podman rootless, we'll create a non-root user named: harvest to run Harvest.

        # as root or sudo\nusermod --append --groups wheel harvest\n

        Login with the harvest user, set up the podman.socket, and make sure the curl below works. su or sudo aren't sufficient, you need to ssh into the machine as the harvest user or use machinectl login. See sudo-rootless-podman for details.

        # these must be run as the harvest user\nsystemctl --user enable podman.socket\nsystemctl --user start podman.socket\nsystemctl --user status podman.socket\nexport DOCKER_HOST=unix:///run/user/$UID/podman/podman.sock\n\nsudo curl -H \"Content-Type: application/json\" --unix-socket /var/run/docker.sock http://localhost/_ping\n

        If the sudo curl does not print OK\u23ce troubleshoot before continuing.

        Run podman info and make sure runRoot points to /run/user/$UID/containers (see below). If it doesn't, you'll probably run into problems when restarting the machine. See errors after rebooting.

        podman info | grep runRoot\n  runRoot: /run/user/1001/containers\n
        "},{"location":"install/podman/#running-harvest","title":"Running Harvest","text":"

        By default, Cockpit runs on port 9090, same as Prometheus. We'll change Prometheus's host port to 9091, so we can run both Cockpit and Prometheus. Line 2 below does that.

        With these changes, the standard Harvest compose instructions can be followed as normal now. In summary,

        1. Add the clusters, exporters, etc. to your harvest.yml file
        2. Generate a compose file from your harvest.yml by running

          docker run --rm \\\n  --env UID=$(id -u) --env GID=$(id -g) \\\n  --entrypoint \"bin/harvest\" \\\n  --volume \"$(pwd):/opt/temp\" \\\n  --volume \"$(pwd)/harvest.yml:/opt/harvest/harvest.yml\" \\\n  ghcr.io/netapp/harvest \\\n  generate docker full \\\n  --output harvest-compose.yml \\\n  --promPort 9091\n
        3. Bring everything up

          docker-compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans\n

        After starting the containers, you can view them with podman ps -a or using Cockpit https://host-ip:9090/podman.

        podman ps -a\nCONTAINER ID  IMAGE                                   COMMAND               CREATED        STATUS            PORTS                     NAMES\n45fd00307d0a  ghcr.io/netapp/harvest:latest           --poller unix --p...  5 seconds ago  Up 5 seconds ago  0.0.0.0:12990->12990/tcp  poller_unix_v21.11.0\nd40585bb903c  localhost/prom/prometheus:latest        --config.file=/et...  5 seconds ago  Up 5 seconds ago  0.0.0.0:9091->9090/tcp    prometheus\n17a2784bc282  localhost/grafana/grafana:latest                              4 seconds ago  Up 5 seconds ago  0.0.0.0:3000->3000/tcp    grafana\n
        "},{"location":"install/podman/#troubleshooting","title":"Troubleshooting","text":"

        Check Podman's troubleshooting docs

        "},{"location":"install/podman/#nothing-works","title":"Nothing works","text":"

        Make sure the DOCKER_HOST env variable is set and that this curl works.

        sudo curl -H \"Content-Type: application/json\" --unix-socket /var/run/docker.sock http://localhost/_ping\n

        Make sure your containers can talk to each other.

        ping prometheus\nPING prometheus (10.88.2.3): 56 data bytes\n64 bytes from 10.88.2.3: seq=0 ttl=42 time=0.059 ms\n64 bytes from 10.88.2.3: seq=1 ttl=42 time=0.065 ms\n
        "},{"location":"install/podman/#errors-after-rebooting","title":"Errors after rebooting","text":"

        After restarting the machine, I see errors like these when running podman ps.

        podman ps -a\nERRO[0000] error joining network namespace for container 424df6c: error retrieving network namespace at /run/user/1001/netns/cni-5fb97adc-b6ef-17e8-565b-0481b311ba09: failed to Statfs \"/run/user/1001/netns/cni-5fb97adc-b6ef-17e8-565b-0481b311ba09\": no such file or directory\n

        Run podman info and make sure runRoot points to /run/user/$UID/containers (see below). If it instead points to /tmp/podman-run-$UID you will likely have problems when restarting the machine. Typically, this happens because you used su to become the harvest user or ran podman as root. You can fix this by logging in as the harvest user and running podman system reset.

        podman info | grep runRoot\n  runRoot: /run/user/1001/containers\n
        "},{"location":"install/podman/#linger-errors","title":"Linger errors","text":"

        When you logout, systemd may remove some temp files and tear down Podman's rootless network. Workaround is to run the following as the harvest user. Details here

        loginctl enable-linger\n
        "},{"location":"install/podman/#versions","title":"Versions","text":"

        The following versions were used to validate this workflow.

        podman version\n\nVersion:      3.2.3\nAPI Version:  3.2.3\nGo Version:   go1.15.7\nBuilt:        Thu Jul 29 11:02:43 2021\nOS/Arch:      linux/amd64\n\ndocker-compose -v\ndocker-compose version 1.29.2, build 5becea4c\n\ncat /etc/redhat-release\nRed Hat Enterprise Linux release 8.4 (Ootpa)\n
        "},{"location":"install/podman/#references","title":"References","text":"
        • https://github.com/containers/podman
        • https://www.redhat.com/sysadmin/sudo-rootless-podman
        • https://www.redhat.com/sysadmin/podman-docker-compose
        • https://fedoramagazine.org/use-docker-compose-with-podman-to-orchestrate-containers-on-fedora/
        • https://podman.io/getting-started/network.html mentions the need for podman-plugins, otherwise rootless containers running in separate containers cannot see each other
        • Troubleshoot Podman
        "},{"location":"resources/ems-alert-runbook/","title":"EMS Alert Runbook","text":"

        This document describes each ONTAP event management system (EMS) event that Harvest collects and remediation steps.

        "},{"location":"resources/ems-alert-runbook/#aws-credentials-not-initialized","title":"AWS Credentials Not Initialized","text":"

        Impact: Availability

        EMS Event: cloud.aws.iamNotInitialized

        This event occurs when a module attempts to access Amazon Web Services (AWS) Identity and Access Management (IAM) role-based credentials from the cloud credentials thread before they are initialized.

        Remediation

        Wait for the cloud credential thread, as well as the system, to complete initialization.

        "},{"location":"resources/ems-alert-runbook/#antivirus-server-busy","title":"Antivirus Server Busy","text":"

        Impact: Availability

        EMS Event: Nblade.vscanConnBackPressure

        The antivirus server is too busy to accept any new scan requests.

        Remediation

        If this message occurs frequently, ensure that there are enough antivirus servers to handle the virus scan load generated by the SVM.

        "},{"location":"resources/ems-alert-runbook/#cloud-tier-unreachable","title":"Cloud Tier Unreachable","text":"

        Impact: Availability

        EMS Event: object.store.unavailable

        A storage node cannot connect to Cloud Tier object store API. Some data will be inaccessible.

        Remediation

        If you use on-premises products, perform the following corrective actions:

        1. Verify that your intercluster LIF is online and functional by using the \"network interface show\" command.
        2. Check the network connectivity to the object store server by using the \"ping\" command over the destination node intercluster LIF.
        3. Ensure the following: a. The configuration of your object store has not changed. b. The login and connectivity information is still valid. Contact NetApp technical support if the issue persists.

        If you use Cloud Volumes ONTAP, perform the following corrective actions:

        1. Ensure that the configuration of your object store has not changed.
        2. Ensure that the login and connectivity information is still valid. Contact NetApp technical support if the issue persists.
        "},{"location":"resources/ems-alert-runbook/#disk-out-of-service","title":"Disk Out of Service","text":"

        Impact: Availability

        EMS Event: disk.outOfService

        This event occurs when a disk is removed from service because it has been marked failed, is being sanitized, or has entered the Maintenance Center.

        "},{"location":"resources/ems-alert-runbook/#disk-shelf-power-supply-discovered","title":"Disk Shelf Power Supply Discovered","text":"

        Impact: Configuration

        EMS Event: diskShelf.psu.added

        This message occurs when a power supply unit is added to the disk shelf.

        "},{"location":"resources/ems-alert-runbook/#disk-shelves-power-supply-removed","title":"Disk Shelves Power Supply Removed","text":"

        Impact: Availability

        EMS Event: diskShelf.psu.removed

        This message occurs when a power supply unit is removed from the disk shelf.

        "},{"location":"resources/ems-alert-runbook/#fc-target-port-commands-exceeded","title":"FC Target Port Commands Exceeded","text":"

        Impact: Availability

        EMS Event: scsitarget.fct.port.full

        The number of outstanding commands on the physical FC target port exceeds the supported limit. The port does not have sufficient buffers for the outstanding commands. It is overrun or the fan-in is too steep because too many initiator I/Os are using it.

        Remediation

        Perform the following corrective actions:

        1. Evaluate the host fan-in on the port, and perform one of the following actions: a. Reduce the number of hosts that log in to this port. b. Reduce the number of LUNs accessed by the hosts that log in to this port. c. Reduce the host command queue depth.
        2. Monitor the \"queue_full\" counter on the \"fcp_port\" CM object, and ensure that it does not increase. For example: statistics show -object fcp_port -counter queue_full -instance port.portname -raw
        3. Monitor the threshold counter and ensure that it does not increase. For example: statistics show -object fcp_port -counter threshold_full -instance port.portname -raw
        "},{"location":"resources/ems-alert-runbook/#fabricpool-mirror-replication-resync-completed","title":"FabricPool Mirror Replication Resync Completed","text":"

        Impact: Capacity

        EMS Event: wafl.ca.resync.complete

        This message occurs when Data ONTAP(R) completes the resync process from the primary object store to the mirror object store for a mirrored FabricPool aggregate.

        "},{"location":"resources/ems-alert-runbook/#fabricpool-space-usage-limit-nearly-reached","title":"FabricPool Space Usage Limit Nearly Reached","text":"

        Impact: Capacity

        EMS Event: fabricpool.nearly.full

        The total cluster-wide FabricPool space usage of object stores from capacity-licensed providers has nearly reached the licensed limit.

        Remediation

        Perform the following corrective actions:

        1. Check the percentage of the licensed capacity used by each FabricPool storage tier by using the \"storage aggregate object-store show-space\" command.
        2. Delete Snapshot copies from volumes with the tiering policy \"snapshot\" or \"backup\" by using the \"volume snapshot delete\" command to clear up space.
        3. Install a new license on the cluster to increase the licensed capacity.
        "},{"location":"resources/ems-alert-runbook/#fabricpool-space-usage-limit-reached","title":"FabricPool Space Usage Limit Reached","text":"

        Impact: Capacity

        EMS Event: fabricpool.full

        The total cluster-wide FabricPool space usage of object stores from capacity-licensed providers has reached the license limit.

        Remediation

        Perform the following corrective actions:

        1. Check the percentage of the licensed capacity used by each FabricPool storage tier by using the \"storage aggregate object-store show-space\" command.
        2. Delete Snapshot copies from volumes with the tiering policy \"snapshot\" or \"backup\" by using the \"volume snapshot delete\" command to clear up space.
        3. Install a new license on the cluster to increase the licensed capacity.
        "},{"location":"resources/ems-alert-runbook/#fanout-snapmirror-relationship-common-snapshot-deleted","title":"Fanout SnapMirror Relationship Common Snapshot Deleted","text":"

        Impact: Protection

        EMS Event: sms.fanout.comm.snap.deleted

        This message occurs when an older Snapshot(tm) copy is deleted as part of a SnapMirror\u00ae Synchronous resynchronize or update (common Snapshot copy) operation, which could lead to a \"no common Snapshot scenario\" between the synchronous and asynchronous disaster recovery (DR) copies that share the same source volume. If there is no common Snapshot copy between the synchronous and asynchronous DR copies, then a re-baseline will need to be performed during a disaster recovery.

        Remediation

        You can ignore this message if there is no asynchronous relationship configured for the synchronous source volume. If there is an asynchronous relationship configured, then update the asynchronous relationship by using the \"snapmirror update\" command. The SnapMirror update operation will transfer the snapshots that will act as common snapshots between the synchronous and asynchronous destinations.

        "},{"location":"resources/ems-alert-runbook/#giveback-of-storage-pool-failed","title":"Giveback of Storage Pool Failed","text":"

        Impact: Availability

        EMS Event: gb.netra.ca.check.failed

        This event occurs during the migration of an storage pool (aggregate) as part of a storage failover (SFO) giveback, when the destination node cannot reach the object stores.

        Remediation

        Perform the following corrective actions:

        1. Verify that your intercluster LIF is online and functional by using the \"network interface show\" command.
        2. Check network connectivity to the object store server by using the\"'ping\" command over the destination node intercluster LIF.
        3. Verify that the configuration of your object store has not changed and that login and connectivity information is still accurate by using the \"aggregate object-store config show\" command.

        Alternatively, you can override the error by specifying false for the \"require-partner-waiting\" parameter of the giveback command.

        Contact NetApp technical support for more information or assistance.

        "},{"location":"resources/ems-alert-runbook/#ha-interconnect-down","title":"HA Interconnect Down","text":"

        Impact: Availability

        EMS Event: callhome.hainterconnect.down

        The high-availability (HA) interconnect is down. Risk of service outage when failover is not available.

        Remediation

        Corrective actions depend on the number and type of HA interconnect links supported by the platform, as well as the reason why the interconnect is down.

        • If the links are down:
          • Verify that both controllers in the HA pair are operational.
          • For externally connected links, make sure that the interconnect cables are connected properly and that the small form-factor pluggables (SFPs), if applicable, are seated properly on both controllers.
          • For internally connected links, disable and re-enable the links, one after the other, by using the \"ic link off\" and \"ic link on\" commands.
        • If links are disabled, enable the links by using the \"ic link on\" command.
        • If a peer is not connected, disable and re-enable the links, one after the other, by using the \"ic link off\" and \"ic link on\" commands.

        Contact NetApp technical support if the issue persists.

        "},{"location":"resources/ems-alert-runbook/#lun-destroyed","title":"LUN Destroyed","text":"

        Impact: Availability

        EMS Event: LUN.destroy

        This event occurs when a LUN is destroyed.

        "},{"location":"resources/ems-alert-runbook/#lun-offline","title":"LUN Offline","text":"

        Impact: Availability

        EMS Event: LUN.offline

        This message occurs when a LUN is brought offline manually.

        Remediation

        Bring the LUN back online.

        "},{"location":"resources/ems-alert-runbook/#main-unit-fan-failed","title":"Main Unit Fan Failed","text":"

        Impact: Availability

        EMS Event: monitor.fan.failed

        One or more main unit fans have failed. The system remains operational.

        However, if the condition persists for too long, the overtemperature might trigger an automatic shutdown.

        Remediation

        Reseat the failed fans. If the error persists, replace them.

        "},{"location":"resources/ems-alert-runbook/#main-unit-fan-in-warning-state","title":"Main Unit Fan in Warning State","text":"

        Impact: Availability

        EMS Event: monitor.fan.warning

        This event occurs when one or more main unit fans are in a warning state.

        Remediation

        Replace the indicated fans to avoid overheating.

        "},{"location":"resources/ems-alert-runbook/#max-sessions-per-user-exceeded","title":"Max Sessions Per User Exceeded","text":"

        Impact: Availability

        EMS Event: Nblade.cifsMaxSessPerUsrConn

        You have exceeded the maximum number of sessions allowed per user over a TCP connection. Any request to establish a session will be denied until some sessions are released.

        Remediation

        Perform the following corrective actions:

        1. Inspect all the applications that run on the client, and terminate any that are not operating properly.
        2. Reboot the client.
        3. Check if the issue is caused by a new or existing application: a. If the application is new, set a higher threshold for the client by using the \"cifs option modify -max-opens-same-file-per-tree\" command. In some cases, clients operate as expected, but require a higher threshold. You should have advanced privilege to set a higher threshold for the client. b. If the issue is caused by an existing application, there might be an issue with the client. Contact NetApp technical support for more information or assistance.
        "},{"location":"resources/ems-alert-runbook/#max-times-open-per-file-exceeded","title":"Max Times Open Per File Exceeded","text":"

        Impact: Availability

        EMS Event: Nblade.cifsMaxOpenSameFile

        You have exceeded the maximum number of times that you can open the file over a TCP connection. Any request to open this file will be denied until you close some open instances of the file. This typically indicates abnormal application behavior.

        Remediation

        Perform the following corrective actions:

        1. Inspect the applications that run on the client using this TCP connection. The client might be operating incorrectly because of the application running on it.
        2. Reboot the client.
        3. Check if the issue is caused by a new or existing application: a. If the application is new, set a higher threshold for the client by using the \"cifs option modify -max-opens-same-file-per-tree\" command. In some cases, clients operate as expected, but require a higher threshold. You should have advanced privilege to set a higher threshold for the client. b. If the issue is caused by an existing application, there might be an issue with the client. Contact NetApp technical support for more information or assistance.
        "},{"location":"resources/ems-alert-runbook/#metrocluster-automatic-unplanned-switchover-disabled","title":"MetroCluster Automatic Unplanned Switchover Disabled","text":"

        Impact: Availability

        EMS Event: mcc.config.auso.stDisabled

        This message occurs when automatic unplanned switchover capability is disabled.

        Remediation

        Run the \"metrocluster modify -node-name -automatic-switchover-onfailure true\" command for each node in the cluster to enable automatic switchover."},{"location":"resources/ems-alert-runbook/#metrocluster-monitoring","title":"MetroCluster Monitoring","text":"

        Impact: Availability

        EMS Event: hm.alert.raised

        Aggregate was left behind during switchback.

        Remediation

        1) Check the aggregate state by using the command \"aggr show\". 2) If the aggregate is online, return it to its original owner by using the command \"metrocluster switchback\".

        "},{"location":"resources/ems-alert-runbook/#nfsv4-store-pool-exhausted","title":"NFSv4 Store Pool Exhausted","text":"

        Impact: Availability

        EMS Event: Nblade.nfsV4PoolExhaust

        A NFSv4 store pool has been exhausted.

        Remediation

        If the NFS server is unresponsive for more than 10 minutes after this event, contact NetApp technical support.

        "},{"location":"resources/ems-alert-runbook/#nvme-namespace-destroyed","title":"NVMe Namespace Destroyed","text":"

        Impact: Availability

        EMS Event: NVMeNS.destroy

        This event occurs when an NVMe namespace is destroyed.

        "},{"location":"resources/ems-alert-runbook/#nvme-namespace-offline","title":"NVMe Namespace Offline","text":"

        Impact: Availability

        EMS Event: NVMeNS.offline

        This event occurs when an NVMe namespace is brought offline manually.

        "},{"location":"resources/ems-alert-runbook/#nvme-namespace-online","title":"NVMe Namespace Online","text":"

        Impact: Availability

        EMS Event: NVMeNS.online

        This event occurs when an NVMe namespace is brought online manually.

        "},{"location":"resources/ems-alert-runbook/#nvme-of-license-grace-period-active","title":"NVMe-oF License Grace Period Active","text":"

        Impact: Availability

        EMS Event: nvmf.graceperiod.active

        This event occurs on a daily basis when the NVMe over Fabrics (NVMe-oF) protocol is in use and the grace period of the license is active. The NVMe-oF functionality requires a license after the license grace period expires. NVMe-oF functionality is disabled when the license grace period is over.

        Remediation

        Contact your sales representative to obtain an NVMe-oF license, and add it to the cluster, or remove all instances of NVMe-oF configuration from the cluster.

        "},{"location":"resources/ems-alert-runbook/#nvme-of-license-grace-period-expired","title":"NVMe-oF License Grace Period Expired","text":"

        Impact: Availability

        EMS Event: nvmf.graceperiod.expired

        The NVMe over Fabrics (NVMe-oF) license grace period is over and the NVMe-oF functionality is disabled.

        Remediation

        Contact your sales representative to obtain an NVMe-oF license, and add it to the cluster.

        "},{"location":"resources/ems-alert-runbook/#nvme-of-license-grace-period-start","title":"NVMe-oF License Grace Period Start","text":"

        Impact: Availability

        EMS Event: nvmf.graceperiod.start

        The NVMe over Fabrics (NVMe-oF) configuration was detected during the upgrade to ONTAP 9.5 software. NVMe-oF functionality requires a license after the license grace period expires.

        Remediation

        Contact your sales representative to obtain an NVMe-oF license, and add it to the cluster.

        "},{"location":"resources/ems-alert-runbook/#nvram-battery-low","title":"NVRAM Battery Low","text":"

        Impact: Availability

        EMS Event: callhome.battery.low

        The NVRAM battery capacity is critically low. There might be a potential data loss if the battery runs out of power.

        Your system generates and transmits an AutoSupport or \"call home\" message to NetApp technical support and the configured destinations if it is configured to do so. The successful delivery of an AutoSupport message significantly improves problem determination and resolution.

        Remediation

        Perform the following corrective actions:

        1. View the battery's current status, capacity, and charging state by using the \"system node environment sensors show\" command.
        2. If the battery was replaced recently or the system was non-operational for an extended period of time, monitor the battery to verify that it is charging properly.
        3. Contact NetApp technical support if the battery runtime continues to decrease below critical levels, and the storage system shuts down automatically.
        "},{"location":"resources/ems-alert-runbook/#netbios-name-conflict","title":"NetBIOS Name Conflict","text":"

        Impact: Availability

        EMS Event: Nblade.cifsNbNameConflict

        The NetBIOS Name Service has received a negative response to a name registration request, from a remote machine. This is typically caused by a conflict in the NetBIOS name or an alias. As a result, clients might not be able to access data or connect to the right data-serving node in the cluster.

        Remediation

        Perform any one of the following corrective actions:

        • If there is a conflict in the NetBIOS name or an alias, perform one of the following:
          • Delete the duplicate NetBIOS alias by using the \"vserver cifs delete -aliases alias -vserver vserver\" command.
          • Rename a NetBIOS alias by deleting the duplicate name and adding an alias with a new name by using the \"vserver cifs create -aliases alias -vserver vserver\" command.
        • If there are no aliases configured and there is a conflict in the NetBIOS name, then rename the CIFS server by using the \"vserver cifs delete -vserver vserver\" and \"vserver cifs create -cifs-server netbiosname\" commands. NOTE: Deleting a CIFS server can make data inaccessible.
        • Remove NetBIOS name or rename the NetBIOS on the remote machine.
        "},{"location":"resources/ems-alert-runbook/#no-registered-scan-engine","title":"No Registered Scan Engine","text":"

        Impact: Availability

        EMS Event: Nblade.vscanNoRegdScanner

        The antivirus connector notified ONTAP that it does not have a registered scan engine. This might cause data unavailability if the \"scan-mandatory\" option is enabled.

        Remediation

        Perform the following corrective actions:

        1. Ensure that the scan engine software installed on the antivirus server is compatible with ONTAP.
        2. Ensure that scan engine software is running and configured to connect to the antivirus connector over local loopback.
        "},{"location":"resources/ems-alert-runbook/#no-vscan-connection","title":"No Vscan Connection","text":"

        Impact: Availability

        EMS Event: Nblade.vscanNoScannerConn

        ONTAP has no Vscan connection to service virus scan requests. This might cause data unavailability if the \"scan-mandatory\" option is enabled.

        Remediation

        Ensure that the scanner pool is properly configured and the antivirus servers are active and connected to ONTAP.

        "},{"location":"resources/ems-alert-runbook/#node-panic","title":"Node Panic","text":"

        Impact: Performance

        EMS Event: sk.panic

        This event is issued when a panic occurs.

        Remediation

        Contact NetApp customer support.

        "},{"location":"resources/ems-alert-runbook/#node-root-volume-space-low","title":"Node Root Volume Space Low","text":"

        Impact: Capacity

        EMS Event: mgmtgwd.rootvolrec.low.space

        The system has detected that the root volume is dangerously low on space. The node is not fully operational. Data LIFs might have failed over within the cluster, because of which NFS and CIFS access is limited on the node. Administrative capability is limited to local recovery procedures for the node to clear up space on the root volume.

        Remediation

        Perform the following corrective actions:

        1. Clear up space on the root volume by deleting old Snapshot copies, deleting files you no longer need from the /mroot directory, or expanding the root volume capacity.
        2. Reboot the controller.

        Contact NetApp technical support for more information or assistance.

        "},{"location":"resources/ems-alert-runbook/#non-responsive-antivirus-server","title":"Non-responsive AntiVirus Server","text":"

        Impact: Availability

        EMS Event: Nblade.vscanConnInactive

        This event occurs when ONTAP(R) detects a non-responsive antivirus (AV) server and forcibly closes its Vscan connection.

        Remediation

        Ensure that the AV server installed on the AV connector can connect to the Storage Virtual Machine (SVM) and receive the scan requests.

        "},{"location":"resources/ems-alert-runbook/#nonexistent-admin-share","title":"Nonexistent Admin Share","text":"

        Impact: Availability

        EMS Event: Nblade.cifsNoPrivShare

        Vscan issue: a client has attempted to connect to a nonexistent ONTAP_ADMIN$ share.

        Remediation

        Ensure that Vscan is enabled for the mentioned SVM ID. Enabling Vscan on a SVM causes the ONTAP_ADMIN$ share to be created for the SVM automatically.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-added","title":"ONTAP Mediator Added","text":"

        Impact: Protection

        EMS Event: sm.mediator.added

        This message occurs when ONTAP Mediator is added successfully on a cluster.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-ca-certificate-expired","title":"ONTAP Mediator CA Certificate Expired","text":"

        Impact: Protection

        EMS Event: sm.mediator.cacert.expired

        This message occurs when the ONTAP Mediator certificate authority (CA) certificate has expired. As a result, all further communication to the ONTAP Mediator will not be possible.

        Remediation

        Remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Update a new CA certificate on the ONTAP Mediator server. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-ca-certificate-expiring","title":"ONTAP Mediator CA Certificate Expiring","text":"

        Impact: Protection

        EMS Event: sm.mediator.cacert.expiring

        This message occurs when the ONTAP Mediator certificate authority (CA) certificate is due to expire within the next 30 days.

        Remediation

        Before this certificate expires, remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Update a new CA certificate on the ONTAP Mediator server. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-client-certificate-expired","title":"ONTAP Mediator Client Certificate Expired","text":"

        Impact: Protection

        EMS Event: sm.mediator.clientc.expired

        This message occurs when the ONTAP Mediator client certificate has expired. As a result, all further communication to the ONTAP Mediator will not be possible.

        Remediation

        Remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-client-certificate-expiring","title":"ONTAP Mediator Client Certificate Expiring","text":"

        Impact: Protection

        EMS Event: sm.mediator.clientc.expiring

        This message occurs when the ONTAP Mediator client certificate is due to expire within the next 30 days.

        Remediation

        Before this certificate expires, remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-not-accessible","title":"ONTAP Mediator Not Accessible","text":"

        Impact: Protection

        EMS Event: sm.mediator.misconfigured

        This message occurs when either the ONTAP Mediator is repurposed or the Mediator package is no longer installed on the Mediator server. As a result, SnapMirror failover is not possible.

        Remediation

        Remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-removed","title":"ONTAP Mediator Removed","text":"

        Impact: Protection

        EMS Event: sm.mediator.removed

        This message occurs when ONTAP Mediator is removed successfully from a cluster.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-server-certificate-expired","title":"ONTAP Mediator Server Certificate Expired","text":"

        Impact: Protection

        EMS Event: sm.mediator.serverc.expired

        This message occurs when the ONTAP Mediator server certificate has expired. As a result, all further communication to the ONTAP Mediator will not be possible.

        Remediation

        Remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Update a new server certificate on the ONTAP Mediator server. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-server-certificate-expiring","title":"ONTAP Mediator Server Certificate Expiring","text":"

        Impact: Protection

        EMS Event: sm.mediator.serverc.expiring

        This message occurs when the ONTAP Mediator server certificate is due to expire within the next 30 days.

        Remediation

        Before this certificate expires, remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Update a new server certificate on the ONTAP Mediator server. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

        "},{"location":"resources/ems-alert-runbook/#ontap-mediator-unreachable","title":"ONTAP Mediator Unreachable","text":"

        Impact: Protection

        EMS Event: sm.mediator.unreachable

        This message occurs when the ONTAP Mediator is unreachable on a cluster. As a result, SnapMirror failover is not possible.

        Remediation

        Check the network connectivity to the ONTAP Mediator by using the \"network ping\" and \"network traceroute\" commands. If the issue persists, remove the configuration of the current ONTAP Mediator by using the \"snapmirror mediator remove\" command. Reconfigure access to the ONTAP Mediator by using the \"snapmirror mediator add\" command.

        "},{"location":"resources/ems-alert-runbook/#object-store-host-unresolvable","title":"Object Store Host Unresolvable","text":"

        Impact: Availability

        EMS Event: objstore.host.unresolvable

        The object store server host name cannot be resolved to an IP address. The object store client cannot communicate with the object-store server without resolving to an IP address. As a result, data might be inaccessible.

        Remediation

        Check the DNS configuration to verify that the host name is configured correctly with an IP address.

        "},{"location":"resources/ems-alert-runbook/#object-store-intercluster-lif-down","title":"Object Store Intercluster LIF Down","text":"

        Impact: Availability

        EMS Event: objstore.interclusterlifDown

        The object-store client cannot find an operational LIF to communicate with the object store server. The node will not allow object store client traffic until the intercluster LIF is operational. As a result, data might be inaccessible.

        Remediation

        Perform the following corrective actions:

        1. Check the intercluster LIF status by using the \"network interface show -role intercluster\" command.
        2. Verify that the intercluster LIF is configured correctly and operational.
        3. If an intercluster LIF is not configured, add it by using the \"network interface create -role intercluster\" command.
        "},{"location":"resources/ems-alert-runbook/#object-store-signature-mismatch","title":"Object Store Signature Mismatch","text":"

        Impact: Availability

        EMS Event: osc.signatureMismatch

        The request signature sent to the object store server does not match the signature calculated by the client. As a result, data might be inaccessible.

        Remediation

        Verify that the secret access key is configured correctly. If it is configured correctly, contact NetApp technical support for assistance.

        "},{"location":"resources/ems-alert-runbook/#qos-monitor-memory-maxed-out","title":"QoS Monitor Memory Maxed Out","text":"

        Impact: Capacity

        EMS Event: qos.monitor.memory.maxed

        This event occurs when a QoS subsystem's dynamic memory reaches its limit for the current platform hardware. As a result, some QoS features might operate in a limited capacity.

        Remediation

        Delete some active workloads or streams to free up memory. Use the \"statistics show -object workload -counter ops\" command to determine which workloads are active. Active workloads show non-zero ops. Then use the \"workload delete \" command multiple times to remove specific workloads. Alternatively, use the \"stream delete -workload *\" command to delete the associated streams from the active workload."},{"location":"resources/ems-alert-runbook/#readdir-timeout","title":"READDIR Timeout","text":"

        Impact: Availability

        EMS Event: wafl.readdir.expired

        A READDIR file operation has exceeded the timeout that it is allowed to run in WAFL. This can be because of very large or sparse directories. Corrective action is recommended.

        Remediation

        Perform the following corrective actions:

        1. Find information specific to recent directories that have had READDIR file operations expire by using the following 'diag' privilege nodeshell CLI command: wafl readdir notice show.
        2. Check if directories are indicated as sparse or not: a. If a directory is indicated as sparse, it is recommended that you copy the contents of the directory to a new directory to remove the sparseness of the directory file. b. If a directory is not indicated as sparse and the directory is large, it is recommended that you reduce the size of the directory file by reducing the number of file entries in the directory.
        "},{"location":"resources/ems-alert-runbook/#ransomware-activity-detected","title":"Ransomware Activity Detected","text":"

        Impact: Security

        EMS Event: callhome.arw.activity.seen

        To protect the data from the detected ransomware, a Snapshot copy has been taken that can be used to restore original data.

        Your system generates and transmits an AutoSupport or \"call home\" message to NetApp technical support and any configured destinations. AutoSupport message improves problem determination and resolution.

        Remediation

        Refer to the anti-ransomware documentation to take remedial measures for ransomware activity. If you need assistance, contact NetApp technical support.

        "},{"location":"resources/ems-alert-runbook/#relocation-of-storage-pool-failed","title":"Relocation of Storage Pool Failed","text":"

        Impact: Availability

        EMS Event: arl.netra.ca.check.failed

        This event occurs during the relocation of an storage pool (aggregate), when the destination node cannot reach the object stores.

        Remediation

        Perform the following corrective actions:

        1. Verify that your intercluster LIF is online and functional by using the \"network interface show\" command.
        2. Check network connectivity to the object store server by using the\"'ping\" command over the destination node intercluster LIF.
        3. Verify that the configuration of your object store has not changed and that login and connectivity information is still accurate by using the \"aggregate object-store config show\" command.

        Alternatively, you can override the error by using the \"override-destination-checks\" parameter of the relocation command.

        Contact NetApp technical support for more information or assistance.

        "},{"location":"resources/ems-alert-runbook/#san-active-active-state-changed","title":"SAN \"active-active\" State Changed","text":"

        Impact: Availability

        EMS Event: scsiblade.san.config.active

        The SAN pathing is no longer symmetric. Pathing should be asymmetric only on ASA, because AFF and FAS are both asymmetric.

        Remediation

        Try and enable the \"active-active\" state. Contact customer support if the problem persists.

        "},{"location":"resources/ems-alert-runbook/#sfp-in-fc-target-adapter-receiving-low-power","title":"SFP in FC target adapter receiving low power","text":"

        Impact: Availability

        EMS Event: scsitarget.fct.sfpRxPowerLow

        This alert occurs when the power received (RX) by a small form-factor pluggable transceiver (SFP in FC target) is at a level below the defined threshold, which might indicate a failing or faulty part.

        Remediation

        Monitor the operating value. If it continues to decrease, then replace the SFP and/or the cables.

        "},{"location":"resources/ems-alert-runbook/#sfp-in-fc-target-adapter-transmitting-low-power","title":"SFP in FC target adapter transmitting low power","text":"

        Impact: Availability

        EMS Event: scsitarget.fct.sfpTxPowerLow

        This alert occurs when the power transmitted (TX) by a small form-factor pluggable transceiver (SFP in FC target) is at a level below the defined threshold, which might indicate a failing or faulty part.

        Remediation

        Monitor the operating value. If it continues to decrease, then replace the SFP and/or the cables.

        "},{"location":"resources/ems-alert-runbook/#service-processor-heartbeat-missed","title":"Service Processor Heartbeat Missed","text":"

        Impact: Availability

        EMS Event: callhome.sp.hbt.missed

        This message occurs when ONTAP does not receive an expected \"heartbeat\" signal from the Service Processor (SP). Along with this message, log files from SP will be sent out for debugging. ONTAP will reset the SP to attempt to restore communication. The SP will be unavailable for up to two minutes while it reboots.

        Remediation

        Contact NetApp technical support.

        "},{"location":"resources/ems-alert-runbook/#service-processor-heartbeat-stopped","title":"Service Processor Heartbeat Stopped","text":"

        Impact: Availability

        EMS Event: callhome.sp.hbt.stopped

        This message occurs when ONTAP is no longer receiving heartbeats from the Service Processor (SP). Depending on the hardware design, the system may continue to serve data or may determine to shut down to prevent data loss or hardware damage. The system continues to serve data, but because the SP might not be working, the system cannot send notifications of down appliances, boot errors, or Open Firmware (OFW) Power-On Self-Test (POST) errors. If your system is configured to do so, it generates and transmits an AutoSupport (or 'call home') message to NetApp technical support and to the configured destinations. Successful delivery of an AutoSupport message significantly improves problem determination and resolution.

        Remediation

        If the system has shut down, attempt a hard power cycle: Pull the controller out from the chassis, push it back in then power on the system. Contact NetApp technical support if the problem persists after the power cycle, or for any other condition that may warrant attention.

        "},{"location":"resources/ems-alert-runbook/#service-processor-not-configured","title":"Service Processor Not Configured","text":"

        Impact: Availability

        EMS Event: sp.notConfigured

        This event occurs on a weekly basis, to remind you to configure the Service Processor (SP). The SP is a physical device that is incorporated into your system to provide remote access and remote management capabilities. You should configure the SP to use its full functionality.

        Remediation

        Perform the following corrective actions:

        1. Configure the SP by using the \"system service-processor network modify\" command.
        2. Optionally, obtain the MAC address of the SP by using the \"system service-processor network show\" command.
        3. Verify the SP network configuration by using the \"system service-processor network show\" command.
        4. Verify that the SP can send an AutoSupport email by using the \"system service-processor autosupport invoke\" command. NOTE: AutoSupport email hosts and recipients should be configured in ONTAP before you issue this command.
        "},{"location":"resources/ems-alert-runbook/#service-processor-offline","title":"Service Processor Offline","text":"

        Impact: Availability

        EMS Event: sp.ipmi.lost.shutdown

        ONTAP is no longer receiving heartbeats from the Service Processor (SP), even though all the SP recovery actions have been taken. ONTAP cannot monitor the health of the hardware without the SP.

        The system will shut down to prevent hardware damage and data loss. Set up a panic alert to be notified immediately if the SP goes offline.

        Remediation

        Power-cycle the system by performing the following actions:

        1. Pull the controller out from the chassis.
        2. Push the controller back in.
        3. Turn the controller back on. If the problem persists, replace the controller module.
        "},{"location":"resources/ems-alert-runbook/#shadow-copy-failed","title":"Shadow Copy Failed","text":"

        Impact: Availability

        EMS Event: cifs.shadowcopy.failure

        A Volume Shadow Copy Service (VSS), a Microsoft Server backup and restore service operation, has failed.

        Remediation

        Check the following using the information provided in the event message:

        • Is shadow copy configuration enabled?
        • Are the appropriate licenses installed?
        • On which shares is the shadow copy operation performed?
        • Is the share name correct?
        • Does the share path exist?
        • What are the states of the shadow copy set and its shadow copies?
        "},{"location":"resources/ems-alert-runbook/#shelf-fan-failed","title":"Shelf Fan Failed","text":"

        Impact: Availability

        EMS Event: ses.status.fanError

        The indicated cooling fan or fan module of the shelf has failed. The disks in the shelf might not receive enough cooling airflow, which might result in disk failure.

        Remediation

        Perform the following corrective actions:

        1. Verify that the fan module is fully seated and secured. NOTE: The fan is integrated into the power supply module in some disk shelves.
        2. If the issue persists, replace the fan module.
        3. If the issue still persists, contact NetApp technical support for assistance.
        "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-common-snapshot-failed","title":"SnapMirror Relationship Common Snapshot Failed","text":"

        Impact: Protection

        EMS Event: sms.common.snapshot.failed

        This message occurs when there is a failure in creating a common Snapshot(tm) copy. The SnapMirror\u00ae Sync relationship continues to be in \"in-sync\" status. The latest common Snapshot copy is used for recovery in case the relationship status changes to \"out-of-sync.\" The common Snapshot copy should be created at scheduled intervals to decrease the recovery time of \"out-of-sync\" relationships.

        Remediation

        Create a common snapshot manually by using the \"snapmirror update\" command at the destination.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-initialization-failed","title":"SnapMirror Relationship Initialization Failed","text":"

        Impact: Protection

        EMS Event: smc.snapmir.init.fail

        This message occurs when a SnapMirror\u00ae 'initialize' command fails and no more retries will be attempted.

        Remediation

        Check the reason for the error, take action accordingly, and issue the command again.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-out-of-sync","title":"SnapMirror Relationship Out of Sync","text":"

        Impact: Protection

        EMS Event: sms.status.out.of.sync

        This event occurs when a SnapMirror(R) Sync relationship status changes from \"in-sync\" to \"out-of-sync\". I/O restrictions are imposed on the source volume based on the mode of replication. Client read or write access to the volume is not allowed for relationships of the \"strict-sync-mirror\" policy type. Data protection is affected.

        Remediation

        Check the network connection between the source and destination volumes. Monitor the SnapMirror Sync relationship status using the \"snapmirror show\" command. \"Auto-resync\" attempts to bring the relationship back to the \"in-sync\" status.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-resync-attempt-failed","title":"SnapMirror Relationship Resync Attempt Failed","text":"

        Impact: Protection

        EMS Event: sms.resync.attempt.failed

        This message occurs when a resynchronize operation between the source volume and destination volume fails. The SnapMirror\u00ae Sync relationship is in \"out-of-sync\" status. Data protection is impacted.

        Remediation

        Monitor SnapMirror Sync status using the \"snapmirror show\" command. If the auto-resync attempts fail, bring the relationship back to \"in-sync\" status manually by using the \"snapmirror resync\" command.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-relationship-snapshot-is-not-replicated","title":"SnapMirror Relationship Snapshot is not Replicated","text":"

        Impact: Protection

        EMS Event: sms.snap.not.replicated

        This message occurs when a Snapshot(tm) copy for SnapMirror\u00ae Synchronous relationship is not successfully replicated.

        Remediation

        No remediation required. User can trigger another snap create request to create a snapshot that exists on both primary and secondary site.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-automatic-unplanned-failover-completed","title":"SnapMirror active sync Automatic Unplanned Failover Completed","text":"

        Impact: Protection

        EMS Event: smbc.aufo.completed

        This message occurs when the SnapMirror\u00ae active sync automatic unplanned failover operation completes.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-automatic-unplanned-failover-failed","title":"SnapMirror active sync Automatic Unplanned Failover Failed","text":"

        Impact: Protection

        EMS Event: smbc.aufo.failed

        This message occurs when the SnapMirror\u00ae active sync automatic unplanned failover operation fails.

        Remediation

        The automatic unplanned failover will be retried internally. However, operations will be suspended till the failover is complete. If AUFO is failing persistently and the customer wishes to continue servicing IO, they can perform \"snapmirror delete -destination-path destination_path\" followed by \"snapmirror break\" on the volumes. Doing so will affect protection as the relationship will be removed, customer will need to re-establish protection relationship.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-planned-failover-completed","title":"SnapMirror active sync Planned Failover Completed","text":"

        Impact: Protection

        EMS Event: smbc.pfo.completed

        This message occurs when the SnapMirror\u00ae active sync planned failover operation completes.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-planned-failover-failed","title":"SnapMirror active sync Planned Failover Failed","text":"

        Impact: Protection

        EMS Event: smbc.pfo.failed

        This message occurs when the SnapMirror\u00ae active sync planned failover operation fails.

        Remediation

        Determine the cause of the failure by using the \"snapmirror failover show -fields error-reason\" command. If the relationship is out-of-sync, wait till the relationship is brought to in-sync. Else, address the error causing planned failover failure and then retry the \"snapmirror failover start -destination-path destination_path\" command.

        "},{"location":"resources/ems-alert-runbook/#snapmirror-active-sync-relationship-out-of-sync","title":"SnapMirror active sync Relationship Out of Sync","text":"

        Impact: Protection

        EMS Event: sms.status.out.of.sync.cg

        This message occurs when a SnapMirror for Business Continuity (SMBC) relationship changes status from \"in-sync\" to \"out-of-sync\". Due to this RPO=0 data protection will be disrupted.

        Remediation

        Check the network connection between the source and destination volumes. Monitor the SMBC relationship status by using the \"snapmirror show\" command on the destination, and by using the \"snapmirror list-destinations\" command on the source. Auto-resync will attempt to bring the relationship back to \"in-sync\" status. If the resync fails, verify that all the nodes in the cluster are in quorum and are healthy.

        "},{"location":"resources/ems-alert-runbook/#storage-switch-power-supplies-failed","title":"Storage Switch Power Supplies Failed","text":"

        Impact: Availability

        EMS Event: cluster.switch.pwr.fail

        There is a missing power supply in the cluster switch. Redundancy is reduced, risk of outage with any further power failures.

        Remediation

        Perform the following corrective actions:

        1. Ensure that the power supply mains, which supplies power to the cluster switch, is turned on.
        2. Ensure that the power cord is connected to the power supply.

        Contact NetApp technical support if the issue persists.

        "},{"location":"resources/ems-alert-runbook/#storage-vm-anti-ransomware-monitoring","title":"Storage VM Anti-ransomware Monitoring","text":"

        Impact: Security

        EMS Event: arw.vserver.state

        The anti-ransomware monitoring for the storage VM is disabled.

        Remediation

        Enable anti-ransomware to protect the storage VM.

        "},{"location":"resources/ems-alert-runbook/#storage-vm-stop-succeeded","title":"Storage VM Stop Succeeded","text":"

        Impact: Availability

        EMS Event: vserver.stop.succeeded

        This message occurs when a 'vserver stop' operation succeeds.

        Remediation

        Use 'vserver start' command to start the data access on a storage VM.

        "},{"location":"resources/ems-alert-runbook/#system-cannot-operate-due-to-main-unit-fan-failure","title":"System Cannot Operate Due to Main Unit Fan Failure","text":"

        Impact: Availability

        EMS Event: monitor.fan.critical

        One or more main unit fans have failed, disrupting system operation. This might lead to a potential data loss.

        Remediation

        Replace the failed fans.

        "},{"location":"resources/ems-alert-runbook/#too-many-cifs-authentication","title":"Too Many CIFS Authentication","text":"

        Impact: Availability

        EMS Event: Nblade.cifsManyAuths

        Many authentication negotiations have occurred simultaneously. There are 256 incomplete new session requests from this client.

        Remediation

        Investigate why the client has created 256 or more new connection requests. You might have to contact the vendor of the client or of the application to determine why the error occurred.

        "},{"location":"resources/ems-alert-runbook/#unassigned-disks","title":"Unassigned Disks","text":"

        Impact: Availability

        EMS Event: unowned.disk.reminder

        System has unassigned disks - capacity is being wasted and your system may have some misconfiguration or partial configuration change applied.

        Remediation

        Perform the following corrective actions:

        1. Determine which disks are unassigned by using the \"disk show -n\" command.
        2. Assign the disks to a system by using the \"disk assign\" command.
        "},{"location":"resources/ems-alert-runbook/#unauthorized-user-access-to-admin-share","title":"Unauthorized User Access to Admin Share","text":"

        Impact: Security

        EMS Event: Nblade.vscanBadUserPrivAccess

        A client has attempted to connect to the privileged ONTAP_ADMIN$ share even though their logged-in user is not an allowed user.

        Remediation

        Perform the following corrective actions:

        1. Ensure that the mentioned username and IP address is configured in one of the active Vscan scanner pools.
        2. Check the scanner pool configuration that is currently active by using the \"vserver vscan scanner pool show-active\" command.
        "},{"location":"resources/ems-alert-runbook/#virus-detected","title":"Virus Detected","text":"

        Impact: Availability

        EMS Event: Nblade.vscanVirusDetected

        A Vscan server has reported an error to the storage system. This typically indicates that a virus has been found. However, other errors on the Vscan server can cause this event.

        Client access to the file is denied. The Vscan server might, depending on its settings and configuration, clean the file, quarantine it, or delete it.

        Remediation

        Check the log of the Vscan server reported in the \"syslog\" event to see if it was able to successfully clean, quarantine, or delete the infected file. If it was not able to do so, a system administrator might have to manually delete the file.

        "},{"location":"resources/ems-alert-runbook/#volume-anti-ransomware-monitoring","title":"Volume Anti-ransomware Monitoring","text":"

        Impact: Security

        EMS Event: arw.volume.state

        The anti-ransomware monitoring for the volume is disabling.

        Remediation

        Enable anti-ransomware to protect the storage VM.

        "},{"location":"resources/ems-alert-runbook/#volume-automatic-resizing-succeeded","title":"Volume Automatic Resizing Succeeded","text":"

        Impact: Capacity

        EMS Event: wafl.vol.autoSize.done

        This event occurs when the automatic resizing of a volume is successful. It happens when the \"autosize grow\" option is enabled, and the volume reaches the grow threshold percentage.

        "},{"location":"resources/ems-alert-runbook/#volume-offline","title":"Volume Offline","text":"

        Impact: Availability

        EMS Event: wafl.vvol.offline

        This message indicates that a volume has been taken offline.

        Remediation

        Bring the volume back online.

        "},{"location":"resources/ems-alert-runbook/#volume-restricted","title":"Volume Restricted","text":"

        Impact: Availability

        EMS Event: wafl.vvol.restrict

        This event indicates that a flexible volume is made restricted.

        Remediation

        Bring the volume back online.

        "},{"location":"resources/matrix/","title":"Matrix","text":""},{"location":"resources/matrix/#matrix","title":"Matrix","text":"

        The \u2133atri\u03c7 package provides the matrix.Matrix data-structure for storage, manipulation and transmission of both numeric and non-numeric (string) data. It is utilized by core components of Harvest, including collectors, plugins and exporters. It furthermore serves as an interface between these components, such that \"the left hand does not know what the right hand does\".

        Internally, the Matrix is a collection of metrics (matrix.Metric) and instances (matrix.Instance) in the form of a 2-dimensional array:

        Since we use hash tables for accessing the elements of the array, all metrics and instances added to the matrix must have a unique key. Metrics are typed and contain the numeric data (i.e. rows) of the Matrix. Instances only serve as pointers to the columents of the Matrix, but they also store non-numeric data as labels (*dict.Dict).

        This package is the architectural backbone of Harvest, therefore understanding it is key for an advanced user or contributor.

        "},{"location":"resources/matrix/#basic-usage","title":"Basic Usage","text":""},{"location":"resources/matrix/#initialize","title":"Initialize","text":"

        func matrix.New(name, object string, identifier string) *Matrix\n// always returns successfully pointer to (empty) Matrix \n
        This section describes how to properly initialize a new Matrix instance. Note that if you write a collector, a Matrix instance is already properly initialized for you (as MyCollector.matrix), and if you write a plugin or exporter, it is passed to you from the collector. That means most of the time you don't have to worry about initializing the Matrix.

        matrix.New() requires three arguments: * UUID is by convention the collector name (e.g. MyCollector) if the Matrix comes from a collector, or the collector name and the plugin name concatenated with a . (e.g. MyCollector.MyPlugin) if the Matrix comes from a plugin. * object is a description of the instances of the Matrix. For example, if we collect data about cars and our instances are cars, a good name would be car. * identifier is a unique key used to identify a matrix instance

        Note that identifier should uniquely identify a Matrix instance. This is not a strict requirement, but guarantees that your data is properly handled by exporters.

        "},{"location":"resources/matrix/#example","title":"Example","text":"

        Here is an example from the point of view of a collector:

        import \"github.com/netapp/harvest/v2/pkg/matrix\"\n\nvar myMatrix *matrix.Matrix\n\nmyMatrix = matrix.New(\"CarCollector\", \"car\", \"car\")\n

        Next step is to add metrics and instances to our Matrix.

        "},{"location":"resources/matrix/#add-instances-and-instance-labels","title":"Add instances and instance labels","text":"
        func (x *Matrix) NewInstance(key string) (*Instance, error)\n// returns pointer to a new Instance, or nil with error (if key is not unique)\n

        func (i *Instance) SetLabel(key, value string)\n// always successful, overwrites existing values\n
        func (i *Instance) GetLabel(key) string\n// always returns value, if label is not set, returns empty string\n

        Once we have initialized a Matrix, we can add instances and add labels to our instances.

        "},{"location":"resources/matrix/#example_1","title":"Example","text":"
        var (\n    instance *matrix.Instance\n    err error\n)\nif instance, err = myMatrix.NewInstance(\"SomeCarMark\"); err != nil {\n    return err\n    // or handle err, but beware that instance is nil\n}\ninstance.SetLabel(\"mark\", \"SomeCarMark\")\ninstance.SetLabel(\"color\", \"red\")\ninstance.SetLabel(\"style\", \"coupe\")\n// add as many labels as you like\ninstance.GetLabel(\"color\") // return \"red\"\ninstance.GetLabel(\"owner\") // returns \"\"\n
        "},{"location":"resources/matrix/#add-metrics","title":"Add Metrics","text":"
        func (x *Matrix) NewMetricInt64(key string) (Metric, error)\n// returns pointer to a new MetricInt64, or nil with error (if key is not unique)\n// note that Metric is an interface\n

        Metrics are typed and there are currently 8 types, all can be created with the same signature as above: * MetricUint8 * MetricUint32 * MetricUint64 * MetricInt * MetricInt32 * MetricInt64 * MetricFloat32 * MetricFloat64 * We are able to read from and write to a metric instance using different types (as displayed in the next section), however choosing a type wisely ensures that this is done efficiently and overflow does not occur.

        We can add labels to metrics just like instances. This is usually done when we deal with histograms:

        func (m Metric) SetLabel(key, value string)\n// always successful, overwrites existing values\n
        func (m Metric) GetLabel(key) string\n// always returns value, if label is not set, returns empty string\n

        "},{"location":"resources/matrix/#example_2","title":"Example","text":"

        Continuing our Matrix for collecting car-related data:

        var (\n    speed, length matrix.Metric\n    err error\n)\n\nif speed, err = myMatrix.NewMetricUint32(\"max_speed\"); err != nil {\n    return err\n}\nif length, err = myMatrix.NewMetricFloat32(\"length_in_mm\"); err != nil {\n    return err\n}\n
        "},{"location":"resources/matrix/#write-numeric-data","title":"Write numeric data","text":"

        func (x *Matrix) Reset()\n// flush numeric data from previous poll\n
        func (m Metric) SetValueInt64(i *Instance, v int64) error\nfunc (m Metric) SetValueUint8(i *Instance, v uint8) error\nfunc (m Metric) SetValueUint64(i *Instance, v uint64) error\nfunc (m Metric) SetValueFloat64(i *Instance, v float64) error\nfunc (m Metric) SetValueBytes(i *Instance, v []byte) error\nfunc (m Metric) SetValueString(i *Instance, v []string) error\n// sets the numeric value for the instance i to v\n// returns error if v is invalid (explained below)\n
        func (m Metric) AddValueInt64(i *Instance, v int64) error\n// increments the numeric value for the instance i by v\n// same signatures for all the types defined above\n

        When possible you should reuse a Matrix for each data poll, but to do that, you need to call Reset() to drop old data from the Matrix. It is safe to add new instances and metrics after calling this method.

        The SetValue*() and AddValue*() methods are typed same as the metrics. Even though you are not required to use the same type as the metric, it is the safest and most efficient way.

        Since most collectors get their data as bytes or strings, it is recommended to use the SetValueString() and SetValueBytes() methods.

        These methods return an error if value v can not be converted to the type of the metric. Error is always nil when the type of v matches the type of the metric.

        "},{"location":"resources/matrix/#example_3","title":"Example","text":"

        Continuing with the previous examples:

        if err = myMatrix.Reset(); err != nil {\n    return\n}\n// write numbers to the matrix using the instance and the metrics we have created\n\n// let the metric do the conversion for us\nif err = speed.SetValueString(instance, \"500\"); err != nil {\n    logger.Error(me.Prefix, \"set speed value: \", err)\n}\n// here we ignore err since type is the metric type\nlength.SetValueFloat64(instance, 10000.00)\n\n// safe to add new instances\nvar instance2 matrix.Instance\nif instance2, err = myMatrix.NewInstance(\"SomeOtherCar\"); err != nil {\n    return err\n}\n\n// possible and safe even though speed has type Float32\n} if err = length.SetValueInt64(instance2, 13000); err != nil {\n    logger.Error(me.Prefix, \"set speed value:\", err)\n}\n\n// possible, but will overflow since speed is unsigned\n} if err = speed.SetValueInt64(instance2, -500); err != nil {\n    logger.Error(me.Prefix, \"set length value:\", err)\n}\n
        "},{"location":"resources/matrix/#read-metrics-and-instances","title":"Read metrics and instances","text":"

        In this section we switch gears and look at the Matrix from the point of view of plugins and exporters. Both those components need to read from the Matrix and have no knowledge of its origin or contents.

        func (x *Matrix) GetMetrics() map[string]Metric\n// returns all metrics in the Matrix\n
        func (x *Matrix) GetInstances() map[string]*Instance\n// returns all instances in the Matrix\n

        Usually we will do a nested loop with these two methods to read all data in the Matrix. See examples below.

        "},{"location":"resources/matrix/#example-iterate-over-instances","title":"Example: Iterate over instances","text":"

        In this example the method PrintKeys() will iterate over a Matrix and print all metric and instance keys.

        func PrintKeys(x *matrix.Matrix) {\n    for instanceKey, _ := range x.GetInstances() {\n        fmt.Println(\"instance key=\", instanceKey)\n    }\n}\n
        "},{"location":"resources/matrix/#example-read-instance-labels","title":"Example: Read instance labels","text":"

        Each instance has a set of labels. We can iterate over these labels with the GetLabel() and GetLabels() method. In this example, we write a function that prints all labels of an instance:

        func PrintLabels(instance *matrix.Instance) {\n    for label, value, := range instance.GetLabels().Map() {\n        fmt.Printf(\"%s=%s\\n\", label, value)\n    }\n}\n
        "},{"location":"resources/matrix/#example-read-metric-values-labels","title":"Example: Read metric values labels","text":"

        Similar to the SetValue* and AddValue* methods, you can choose a type when reading from a metric. If you don't know the type of the metric, it is safe to read it as a string. In this example, we write a function that prints the value of a metric for all instances in a Matrix:

        func PrintMetricValues(x *matrix.Matrix, m matrix.Metric) {\n    for key, instance := range x.GetInstances() {\n        if value, has := m.GetValueString(instance) {\n            fmt.Printf(\"instance %s = %s\\n\", key, value)\n        } else {\n            fmt.Printf(\"instance %s has no value\\n\", key)\n        }\n    }\n}\n
        "},{"location":"resources/power-algorithm/","title":"Power Algorithm","text":"

        Gathering power metrics requires a cluster with:

        • ONTAP versions 9.6+
        • REST enabled, even when using the ZAPI collector. After granting REST permissions, restart Harvest.

        REST is required because it is the only way to collect chassis field-replaceable-unit (FRU) information via the REST API /api/private/cli/system/chassis/fru.

        "},{"location":"resources/power-algorithm/#how-does-harvest-calculate-cluster-power","title":"How does Harvest calculate cluster power?","text":"

        Cluster power is the sum of a cluster's node(s) power + the sum of attached disk shelve(s) power.

        Redundant power supplies (PSU) load-share the total load. With n PSUs, each PSU does roughly (1/n) the work (the actual amount is slightly more than a single PSU due to additional fans.)

        "},{"location":"resources/power-algorithm/#node-power","title":"Node power","text":"

        Node power is calculated by collecting power supply unit (PSU) power, as reported by REST /api/private/cli/system/environment/sensors or by ZAPI environment-sensors-get-iter.

        When a power supply is shared between controllers, the PSU's power will be evenly divided across the controllers due to load-sharing.

        For example:

        • FAS2750 models have two power supplies that power both controllers. Each PSU is shared between the two controllers.
        • A800 models have four power supplies. PSU1 and PSU2 power Controller1 and PSU3 and PSU4 power Controller2. Each PSU provides power to a single controller.

        Harvest determines whether a PSU is shared between controllers by consulting the connected_nodes of each PSU, as reported by ONTAP via /api/private/cli/system/chassis/fru

        "},{"location":"resources/power-algorithm/#disk-shelf-power","title":"Disk shelf power","text":"

        Disk shelf power is calculated by collecting psu.power_drawn, as reported by REST, via /api/storage/shelves or sensor-reading, as reported by ZAPI storage-shelf-info-get-iter.

        The power for embedded shelves is ignored, since that power is already accounted for in the controller's power draw.

        "},{"location":"resources/power-algorithm/#examples","title":"Examples","text":""},{"location":"resources/power-algorithm/#fas2750","title":"FAS2750","text":"
        # Power Metrics for 10.61.183.200\n\n## ONTAP version NetApp Release 9.8P16: Fri Dec 02 02:05:05 UTC 2022\n\n## Nodes\nsystem show\n       Node         |  Model  | SerialNumber  \n----------------------+---------+---------------\ncie-na2750-g1344-01 | FAS2750 | 621841000123  \ncie-na2750-g1344-02 | FAS2750 | 621841000124\n\n## Chassis\nsystem chassis fru show\n ChassisId   |      Name       |         Fru         |    Type    | Status | NumNodes |              ConnectedNodes               \n---------------+-----------------+---------------------+------------+--------+----------+-------------------------------------------\n021827030435 | 621841000123    | cie-na2750-g1344-01 | controller | ok     |        1 | cie-na2750-g1344-01                       \n021827030435 | 621841000124    | cie-na2750-g1344-02 | controller | ok     |        1 | cie-na2750-g1344-02                       \n021827030435 | PSQ094182201794 | PSU2 FRU            | psu        | ok     |        2 | cie-na2750-g1344-02, cie-na2750-g1344-01  \n021827030435 | PSQ094182201797 | PSU1 FRU            | psu        | ok     |        2 | cie-na2750-g1344-02, cie-na2750-g1344-01\n\n## Sensors\nsystem environment sensors show\n(filtered by power, voltage, current)\n       Node         |     Name      |  Type   | State  | Value | Units  \n----------------------+---------------+---------+--------+-------+--------\ncie-na2750-g1344-01 | PSU1 12V Curr | current | normal |  9920 | mA     \ncie-na2750-g1344-01 | PSU1 12V      | voltage | normal | 12180 | mV     \ncie-na2750-g1344-01 | PSU1 5V Curr  | current | normal |  4490 | mA     \ncie-na2750-g1344-01 | PSU1 5V       | voltage | normal |  5110 | mV     \ncie-na2750-g1344-01 | PSU2 12V Curr | current | normal |  9140 | mA     \ncie-na2750-g1344-01 | PSU2 12V      | voltage | normal | 12100 | mV     \ncie-na2750-g1344-01 | PSU2 5V Curr  | current | normal |  4880 | mA     \ncie-na2750-g1344-01 | PSU2 5V       | voltage | normal |  5070 | mV     \ncie-na2750-g1344-02 | PSU1 12V Curr | current | normal |  9920 | mA     \ncie-na2750-g1344-02 | PSU1 12V      | voltage | normal | 12180 | mV     \ncie-na2750-g1344-02 | PSU1 5V Curr  | current | normal |  4330 | mA     \ncie-na2750-g1344-02 | PSU1 5V       | voltage | normal |  5110 | mV     \ncie-na2750-g1344-02 | PSU2 12V Curr | current | normal |  9170 | mA     \ncie-na2750-g1344-02 | PSU2 12V      | voltage | normal | 12100 | mV     \ncie-na2750-g1344-02 | PSU2 5V Curr  | current | normal |  4720 | mA     \ncie-na2750-g1344-02 | PSU2 5V       | voltage | normal |  5070 | mV\n\n## Shelf PSUs\nstorage shelf show\nShelf | ProductId | ModuleType | PSUId | PSUIsEnabled | PSUPowerDrawn | Embedded  \n------+-----------+------------+-------+--------------+---------------+---------\n  1.0 | DS224-12  | iom12e     | 1,2   | true,true    | 1397,1318     | true\n\n### Controller Power From Sum(InVoltage * InCurrent)/NumNodes\nPower: 256W\n
        "},{"location":"resources/power-algorithm/#aff-a800","title":"AFF A800","text":"
        # Power Metrics for 10.61.124.110\n\n## ONTAP version NetApp Release 9.13.1P1: Tue Jul 25 10:19:28 UTC 2023\n\n## Nodes\nsystem show\n  Node    |  Model   | SerialNumber  \n----------+----------+-------------\na800-1-01 | AFF-A800 | 941825000071  \na800-1-02 | AFF-A800 | 941825000072\n\n## Chassis\nsystem chassis fru show\n   ChassisId    |      Name      |    Fru    |    Type    | Status | NumNodes | ConnectedNodes  \n----------------+----------------+-----------+------------+--------+----------+---------------\nSHFFG1826000154 | 941825000071   | a800-1-01 | controller | ok     |        1 | a800-1-01       \nSHFFG1826000154 | 941825000072   | a800-1-02 | controller | ok     |        1 | a800-1-02       \nSHFFG1826000154 | EEQT1822002800 | PSU1 FRU  | psu        | ok     |        1 | a800-1-02       \nSHFFG1826000154 | EEQT1822002804 | PSU2 FRU  | psu        | ok     |        1 | a800-1-02       \nSHFFG1826000154 | EEQT1822002805 | PSU2 FRU  | psu        | ok     |        1 | a800-1-01       \nSHFFG1826000154 | EEQT1822002806 | PSU1 FRU  | psu        | ok     |        1 | a800-1-01\n\n## Sensors\nsystem environment sensors show\n(filtered by power, voltage, current)\n  Node    |     Name      |  Type   | State  | Value | Units  \n----------+---------------+---------+--------+-------+------\na800-1-01 | PSU1 Power In | unknown | normal |   376 | W      \na800-1-01 | PSU2 Power In | unknown | normal |   411 | W      \na800-1-02 | PSU1 Power In | unknown | normal |   383 | W      \na800-1-02 | PSU2 Power In | unknown | normal |   433 | W\n\n## Shelf PSUs\nstorage shelf show\nShelf |  ProductId  | ModuleType | PSUId | PSUIsEnabled | PSUPowerDrawn | Embedded  \n------+-------------+------------+-------+--------------+---------------+---------\n  1.0 | FS4483PSM3E | psm3e      |       |              |               | true      \n\n### Controller Power From Sum(InPower sensors)\nPower: 1603W\n
        "},{"location":"resources/rest-perf-metrics/","title":"REST Perf Metrics","text":"

        This document describes implementation details about ONTAP's REST performance metrics endpoints, including how we built the Harvest RESTPerf collectors.

        Warning

        These are implementation details about ONTAP's REST performance metrics. You do not need to understand any of this to use Harvest. If you want to know how to use or configure Harvest's REST collectors, checkout the Rest Collector documentation instead. If you're interested in the gory details. Read on.

        "},{"location":"resources/rest-perf-metrics/#introduction","title":"Introduction","text":"

        ONTAP REST metrics were introduced in ONTAP 9.11.1 and included parity with Harvest-collected ZAPI performance metrics by ONTAP 9.12.1.

        "},{"location":"resources/rest-perf-metrics/#performance-rest-queries","title":"Performance REST queries","text":"

        Mapping table

        ZAPI REST Comment perf-object-counter-list-info /api/cluster/counter/tables returns counter tables and schemas perf-object-instance-list-info-iter /api/cluster/counter/tables/{name}/rows returns instances and counter values perf-object-get-instances /api/cluster/counter/tables/{name}/rows returns instances and counter values

        Performance REST responses include properties and counters. Counters are metric-like, while properties include instance attributes.

        "},{"location":"resources/rest-perf-metrics/#examples","title":"Examples","text":""},{"location":"resources/rest-perf-metrics/#ask-ontap-for-all-resources-that-report-performance-metrics","title":"Ask ONTAP for all resources that report performance metrics","text":"
        curl 'https://$clusterIP/api/cluster/counter/tables'\n
        Response

        {\n  \"records\": [\n    {\n      \"name\": \"copy_manager\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/copy_manager\"\n        }\n      }\n    },\n    {\n      \"name\": \"copy_manager:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/copy_manager%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"disk\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/disk\"\n        }\n      }\n    },\n    {\n      \"name\": \"disk:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/disk%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"disk:raid_group\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/disk%3Araid_group\"\n        }\n      }\n    },\n    {\n      \"name\": \"external_cache\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/external_cache\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp_lif\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp_lif\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp_lif:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp_lif%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp_lif:port\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp_lif%3Aport\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcp_lif:svm\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcp_lif%3Asvm\"\n        }\n      }\n    },\n    {\n      \"name\": \"fcvi\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/fcvi\"\n        }\n      }\n    },\n    {\n      \"name\": \"headroom_aggregate\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/headroom_aggregate\"\n        }\n      }\n    },\n    {\n      \"name\": \"headroom_cpu\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/headroom_cpu\"\n        }\n      }\n    },\n    {\n      \"name\": \"host_adapter\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/host_adapter\"\n        }\n      }\n    },\n    {\n      \"name\": \"iscsi_lif\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/iscsi_lif\"\n        }\n      }\n    },\n    {\n      \"name\": \"iscsi_lif:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/iscsi_lif%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"iscsi_lif:svm\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/iscsi_lif%3Asvm\"\n        }\n      }\n    },\n    {\n      \"name\": \"lif\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lif\"\n        }\n      }\n    },\n    {\n      \"name\": \"lif:svm\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lif%3Asvm\"\n        }\n      }\n    },\n    {\n      \"name\": \"lun\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lun\"\n        }\n      }\n    },\n    {\n      \"name\": \"lun:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lun%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"lun:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/lun%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"namespace\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/namespace\"\n        }\n      }\n    },\n    {\n      \"name\": \"namespace:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/namespace%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"nfs_v4_diag\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nfs_v4_diag\"\n        }\n      }\n    },\n    {\n      \"name\": \"nic_common\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nic_common\"\n        }\n      }\n    },\n    {\n      \"name\": \"nvmf_lif\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nvmf_lif\"\n        }\n      }\n    },\n    {\n      \"name\": \"nvmf_lif:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nvmf_lif%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"nvmf_lif:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nvmf_lif%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"nvmf_lif:port\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/nvmf_lif%3Aport\"\n        }\n      }\n    },\n    {\n      \"name\": \"object_store_client_op\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/object_store_client_op\"\n        }\n      }\n    },\n    {\n      \"name\": \"path\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/path\"\n        }\n      }\n    },\n    {\n      \"name\": \"processor\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/processor\"\n        }\n      }\n    },\n    {\n      \"name\": \"processor:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/processor%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos:policy_group\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos%3Apolicy_group\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos_detail\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos_detail\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos_detail_volume\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos_detail_volume\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos_volume\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos_volume\"\n        }\n      }\n    },\n    {\n      \"name\": \"qos_volume:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qos_volume%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"qtree\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qtree\"\n        }\n      }\n    },\n    {\n      \"name\": \"qtree:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/qtree%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_cifs\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_cifs\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_cifs:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_cifs%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_cifs:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_cifs%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v3\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v3\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v3:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v3%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v3:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v3%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v4\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v4\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v41\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v41\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v41:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v41%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v41:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v41%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v42\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v42\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v42:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v42%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v42:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v42%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v4:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v4%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"svm_nfs_v4:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/svm_nfs_v4%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"system\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system\"\n        }\n      }\n    },\n    {\n      \"name\": \"system:constituent\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system%3Aconstituent\"\n        }\n      }\n    },\n    {\n      \"name\": \"system:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"token_manager\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/token_manager\"\n        }\n      }\n    },\n    {\n      \"name\": \"volume\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/volume\"\n        }\n      }\n    },\n    {\n      \"name\": \"volume:node\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/volume%3Anode\"\n        }\n      }\n    },\n    {\n      \"name\": \"volume:svm\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/volume%3Asvm\"\n        }\n      }\n    },\n    {\n      \"name\": \"wafl\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/wafl\"\n        }\n      }\n    },\n    {\n      \"name\": \"wafl_comp_aggr_vol_bin\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/wafl_comp_aggr_vol_bin\"\n        }\n      }\n    },\n    {\n      \"name\": \"wafl_hya_per_aggregate\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/wafl_hya_per_aggregate\"\n        }\n      }\n    },\n    {\n      \"name\": \"wafl_hya_sizer\",\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/wafl_hya_sizer\"\n        }\n      }\n    }\n  ],\n  \"num_records\": 71,\n  \"_links\": {\n    \"self\": {\n      \"href\": \"/api/cluster/counter/tables/\"\n    }\n  }\n}\n

        "},{"location":"resources/rest-perf-metrics/#node-performance-metrics-metadata","title":"Node performance metrics metadata","text":"

        Ask ONTAP to return the schema for system:node. This will include the name, description, and metadata for all counters associated with system:node.

        curl 'https://$clusterIP/api/cluster/counter/tables/system:node?return_records=true'\n
        Response

        {\n  \"name\": \"system:node\",\n  \"description\": \"The System table reports general system activity. This includes global throughput for the main services, I/O latency, and CPU activity. The alias name for system:node is system_node.\",\n  \"counter_schemas\": [\n    {\n      \"name\": \"average_processor_busy_percent\",\n      \"description\": \"Average processor utilization across all processors in the system\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"cifs_ops\",\n      \"description\": \"Number of CIFS operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"cp\",\n      \"description\": \"CP time rate\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"cp_time\",\n      \"description\": \"Processor time in CP\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"cpu_busy\",\n      \"description\": \"System CPU resource utilization. Returns a computed percentage for the default CPU field. Basically computes a 'cpu usage summary' value which indicates how 'busy' the system is based upon the most heavily utilized domain. The idea is to determine the amount of available CPU until we're limited by either a domain maxing out OR we exhaust all available idle CPU cycles, whichever occurs first.\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"cpu_elapsed_time\",\n      \"description\": \"Elapsed time since boot\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"disk_data_read\",\n      \"description\": \"Number of disk kilobytes (KB) read per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"disk_data_written\",\n      \"description\": \"Number of disk kilobytes (KB) written per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"domain_busy\",\n      \"description\": \"Array of processor time in percentage spent in various domains\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"domain_shared\",\n      \"description\": \"Array of processor time in percentage spent in various shared domains\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"dswitchto_cnt\",\n      \"description\": \"Array of processor time in percentage spent in domain switch\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"fcp_data_received\",\n      \"description\": \"Number of FCP kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"fcp_data_sent\",\n      \"description\": \"Number of FCP kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"fcp_ops\",\n      \"description\": \"Number of FCP operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"hard_switches\",\n      \"description\": \"Number of context switches per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"hdd_data_read\",\n      \"description\": \"Number of HDD Disk kilobytes (KB) read per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"hdd_data_written\",\n      \"description\": \"Number of HDD kilobytes (KB) written per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"idle\",\n      \"description\": \"Processor idle rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"idle_time\",\n      \"description\": \"Processor idle time\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"instance_name\",\n      \"description\": \"Node name\",\n      \"type\": \"string\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"interrupt\",\n      \"description\": \"Processor interrupt rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"interrupt_in_cp\",\n      \"description\": \"Processor interrupt rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cp_time\"\n      }\n    },\n    {\n      \"name\": \"interrupt_in_cp_time\",\n      \"description\": \"Processor interrupt in CP time\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"interrupt_num\",\n      \"description\": \"Processor interrupt number\",\n      \"type\": \"delta\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"interrupt_num_in_cp\",\n      \"description\": \"Number of processor interrupts in CP\",\n      \"type\": \"delta\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"interrupt_time\",\n      \"description\": \"Processor interrupt time\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"intr_cnt\",\n      \"description\": \"Array of interrupt count per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"intr_cnt_ipi\",\n      \"description\": \"IPI interrupt count per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"intr_cnt_msec\",\n      \"description\": \"Millisecond interrupt count per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"intr_cnt_total\",\n      \"description\": \"Total interrupt count per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"iscsi_data_received\",\n      \"description\": \"iSCSI kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"iscsi_data_sent\",\n      \"description\": \"iSCSI kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"iscsi_ops\",\n      \"description\": \"Number of iSCSI operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"memory\",\n      \"description\": \"Total memory in megabytes (MB)\",\n      \"type\": \"raw\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"network_data_received\",\n      \"description\": \"Number of network kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"network_data_sent\",\n      \"description\": \"Number of network kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nfs_ops\",\n      \"description\": \"Number of NFS operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"non_interrupt\",\n      \"description\": \"Processor non-interrupt rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"non_interrupt_time\",\n      \"description\": \"Processor non-interrupt time\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"num_processors\",\n      \"description\": \"Number of active processors in the system\",\n      \"type\": \"raw\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"nvme_fc_data_received\",\n      \"description\": \"NVMe/FC kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_fc_data_sent\",\n      \"description\": \"NVMe/FC kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_fc_ops\",\n      \"description\": \"NVMe/FC operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"nvme_roce_data_received\",\n      \"description\": \"NVMe/RoCE kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_roce_data_sent\",\n      \"description\": \"NVMe/RoCE kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_roce_ops\",\n      \"description\": \"NVMe/RoCE operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"nvme_tcp_data_received\",\n      \"description\": \"NVMe/TCP kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_tcp_data_sent\",\n      \"description\": \"NVMe/TCP kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"nvme_tcp_ops\",\n      \"description\": \"NVMe/TCP operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"other_data\",\n      \"description\": \"Other throughput\",\n      \"type\": \"rate\",\n      \"unit\": \"b_per_sec\"\n    },\n    {\n      \"name\": \"other_latency\",\n      \"description\": \"Average latency for all other operations in the system in microseconds\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"other_ops\"\n      }\n    },\n    {\n      \"name\": \"other_ops\",\n      \"description\": \"All other operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"partner_data_received\",\n      \"description\": \"SCSI Partner kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"partner_data_sent\",\n      \"description\": \"SCSI Partner kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"processor_plevel\",\n      \"description\": \"Processor plevel rate percentage\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"processor_plevel_time\",\n      \"description\": \"Processor plevel rate percentage\",\n      \"type\": \"delta\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"read_data\",\n      \"description\": \"Read throughput\",\n      \"type\": \"rate\",\n      \"unit\": \"b_per_sec\"\n    },\n    {\n      \"name\": \"read_latency\",\n      \"description\": \"Average latency for all read operations in the system in microseconds\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"read_ops\"\n      }\n    },\n    {\n      \"name\": \"read_ops\",\n      \"description\": \"Read operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"sk_switches\",\n      \"description\": \"Number of sk switches per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"ssd_data_read\",\n      \"description\": \"Number of SSD Disk kilobytes (KB) read per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"ssd_data_written\",\n      \"description\": \"Number of SSD Disk kilobytes (KB) written per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"sys_read_data\",\n      \"description\": \"Network and FCP kilobytes (KB) received per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"sys_total_data\",\n      \"description\": \"Network and FCP kilobytes (KB) received and sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"sys_write_data\",\n      \"description\": \"Network and FCP kilobytes (KB) sent per second\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"tape_data_read\",\n      \"description\": \"Tape bytes read per millisecond\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"tape_data_written\",\n      \"description\": \"Tape bytes written per millisecond\",\n      \"type\": \"rate\",\n      \"unit\": \"kb_per_sec\"\n    },\n    {\n      \"name\": \"time\",\n      \"description\": \"Time in seconds since the Epoch (00:00:00 UTC January 1 1970)\",\n      \"type\": \"raw\",\n      \"unit\": \"sec\"\n    },\n    {\n      \"name\": \"time_per_interrupt\",\n      \"description\": \"Processor time per interrupt\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"interrupt_num\"\n      }\n    },\n    {\n      \"name\": \"time_per_interrupt_in_cp\",\n      \"description\": \"Processor time per interrupt in CP\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"interrupt_num_in_cp\"\n      }\n    },\n    {\n      \"name\": \"total_data\",\n      \"description\": \"Total throughput in bytes\",\n      \"type\": \"rate\",\n      \"unit\": \"b_per_sec\"\n    },\n    {\n      \"name\": \"total_latency\",\n      \"description\": \"Average latency for all operations in the system in microseconds\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"total_ops\"\n      }\n    },\n    {\n      \"name\": \"total_ops\",\n      \"description\": \"Total number of operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    },\n    {\n      \"name\": \"total_processor_busy\",\n      \"description\": \"Total processor utilization of all processors in the system\",\n      \"type\": \"percent\",\n      \"unit\": \"percent\",\n      \"denominator\": {\n        \"name\": \"cpu_elapsed_time\"\n      }\n    },\n    {\n      \"name\": \"total_processor_busy_time\",\n      \"description\": \"Total processor time of all processors in the system\",\n      \"type\": \"delta\",\n      \"unit\": \"microsec\"\n    },\n    {\n      \"name\": \"uptime\",\n      \"description\": \"Time in seconds that the system has been up\",\n      \"type\": \"raw\",\n      \"unit\": \"sec\"\n    },\n    {\n      \"name\": \"wafliron\",\n      \"description\": \"Wafliron counters\",\n      \"type\": \"delta\",\n      \"unit\": \"none\"\n    },\n    {\n      \"name\": \"write_data\",\n      \"description\": \"Write throughput\",\n      \"type\": \"rate\",\n      \"unit\": \"b_per_sec\"\n    },\n    {\n      \"name\": \"write_latency\",\n      \"description\": \"Average latency for all write operations in the system in microseconds\",\n      \"type\": \"average\",\n      \"unit\": \"microsec\",\n      \"denominator\": {\n        \"name\": \"write_ops\"\n      }\n    },\n    {\n      \"name\": \"write_ops\",\n      \"description\": \"Write operations per second\",\n      \"type\": \"rate\",\n      \"unit\": \"per_sec\"\n    }\n  ],\n  \"_links\": {\n    \"self\": {\n      \"href\": \"/api/cluster/counter/tables/system:node\"\n    }\n  }\n}\n

        "},{"location":"resources/rest-perf-metrics/#node-performance-metrics-with-all-instances-properties-and-counters","title":"Node performance metrics with all instances, properties, and counters","text":"

        Ask ONTAP to return all instances of system:node. For each system:node include all of that node's properties and performance metrics.

        curl 'https://$clusterIP/api/cluster/counter/tables/system:node/rows?fields=*&return_records=true'\n
        Response

        {\n  \"records\": [\n    {\n      \"counter_table\": {\n        \"name\": \"system:node\"\n      },\n      \"id\": \"umeng-aff300-01:28e14eab-0580-11e8-bd9d-00a098d39e12\",\n      \"properties\": [\n        {\n          \"name\": \"node.name\",\n          \"value\": \"umeng-aff300-01\"\n        },\n        {\n          \"name\": \"system_model\",\n          \"value\": \"AFF-A300\"\n        },\n        {\n          \"name\": \"ontap_version\",\n          \"value\": \"NetApp Release R9.12.1xN_221108_1315: Tue Nov  8 15:32:25 EST 2022 \"\n        },\n        {\n          \"name\": \"compile_flags\",\n          \"value\": \"1\"\n        },\n        {\n          \"name\": \"serial_no\",\n          \"value\": \"721802000260\"\n        },\n        {\n          \"name\": \"system_id\",\n          \"value\": \"0537124012\"\n        },\n        {\n          \"name\": \"hostname\",\n          \"value\": \"umeng-aff300-01\"\n        },\n        {\n          \"name\": \"name\",\n          \"value\": \"umeng-aff300-01\"\n        },\n        {\n          \"name\": \"uuid\",\n          \"value\": \"28e14eab-0580-11e8-bd9d-00a098d39e12\"\n        }\n      ],\n      \"counters\": [\n        {\n          \"name\": \"memory\",\n          \"value\": 88766\n        },\n        {\n          \"name\": \"nfs_ops\",\n          \"value\": 15991465\n        },\n        {\n          \"name\": \"cifs_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"fcp_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"iscsi_ops\",\n          \"value\": 355884195\n        },\n        {\n          \"name\": \"nvme_fc_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"network_data_received\",\n          \"value\": 33454266379\n        },\n        {\n          \"name\": \"network_data_sent\",\n          \"value\": 9938586739\n        },\n        {\n          \"name\": \"fcp_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"fcp_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"iscsi_data_received\",\n          \"value\": 4543696942\n        },\n        {\n          \"name\": \"iscsi_data_sent\",\n          \"value\": 3058795391\n        },\n        {\n          \"name\": \"nvme_fc_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_fc_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"partner_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"partner_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"sys_read_data\",\n          \"value\": 33454266379\n        },\n        {\n          \"name\": \"sys_write_data\",\n          \"value\": 9938586739\n        },\n        {\n          \"name\": \"sys_total_data\",\n          \"value\": 43392853118\n        },\n        {\n          \"name\": \"disk_data_read\",\n          \"value\": 32083838540\n        },\n        {\n          \"name\": \"disk_data_written\",\n          \"value\": 21102507352\n        },\n        {\n          \"name\": \"hdd_data_read\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"hdd_data_written\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"ssd_data_read\",\n          \"value\": 32083838540\n        },\n        {\n          \"name\": \"ssd_data_written\",\n          \"value\": 21102507352\n        },\n        {\n          \"name\": \"tape_data_read\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"tape_data_written\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"read_ops\",\n          \"value\": 33495530\n        },\n        {\n          \"name\": \"write_ops\",\n          \"value\": 324699398\n        },\n        {\n          \"name\": \"other_ops\",\n          \"value\": 13680732\n        },\n        {\n          \"name\": \"total_ops\",\n          \"value\": 371875660\n        },\n        {\n          \"name\": \"read_latency\",\n          \"value\": 14728140707\n        },\n        {\n          \"name\": \"write_latency\",\n          \"value\": 1568830328022\n        },\n        {\n          \"name\": \"other_latency\",\n          \"value\": 2132691612\n        },\n        {\n          \"name\": \"total_latency\",\n          \"value\": 1585691160341\n        },\n        {\n          \"name\": \"read_data\",\n          \"value\": 3212301497187\n        },\n        {\n          \"name\": \"write_data\",\n          \"value\": 4787509093524\n        },\n        {\n          \"name\": \"other_data\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"total_data\",\n          \"value\": 7999810590711\n        },\n        {\n          \"name\": \"cpu_busy\",\n          \"value\": 790347800332\n        },\n        {\n          \"name\": \"cpu_elapsed_time\",\n          \"value\": 3979034040025\n        },\n        {\n          \"name\": \"average_processor_busy_percent\",\n          \"value\": 788429907770\n        },\n        {\n          \"name\": \"total_processor_busy\",\n          \"value\": 12614878524320\n        },\n        {\n          \"name\": \"total_processor_busy_time\",\n          \"value\": 12614878524320\n        },\n        {\n          \"name\": \"num_processors\",\n          \"value\": 16\n        },\n        {\n          \"name\": \"interrupt_time\",\n          \"value\": 118435504138\n        },\n        {\n          \"name\": \"interrupt\",\n          \"value\": 118435504138\n        },\n        {\n          \"name\": \"interrupt_num\",\n          \"value\": 1446537540\n        },\n        {\n          \"name\": \"time_per_interrupt\",\n          \"value\": 118435504138\n        },\n        {\n          \"name\": \"non_interrupt_time\",\n          \"value\": 12496443020182\n        },\n        {\n          \"name\": \"non_interrupt\",\n          \"value\": 12496443020182\n        },\n        {\n          \"name\": \"idle_time\",\n          \"value\": 51049666116080\n        },\n        {\n          \"name\": \"idle\",\n          \"value\": 51049666116080\n        },\n        {\n          \"name\": \"cp_time\",\n          \"value\": 221447740301\n        },\n        {\n          \"name\": \"cp\",\n          \"value\": 221447740301\n        },\n        {\n          \"name\": \"interrupt_in_cp_time\",\n          \"value\": 7969316828\n        },\n        {\n          \"name\": \"interrupt_in_cp\",\n          \"value\": 7969316828\n        },\n        {\n          \"name\": \"interrupt_num_in_cp\",\n          \"value\": 1639345044\n        },\n        {\n          \"name\": \"time_per_interrupt_in_cp\",\n          \"value\": 7969316828\n        },\n        {\n          \"name\": \"sk_switches\",\n          \"value\": 3830419593\n        },\n        {\n          \"name\": \"hard_switches\",\n          \"value\": 2786999477\n        },\n        {\n          \"name\": \"intr_cnt_msec\",\n          \"value\": 3978648113\n        },\n        {\n          \"name\": \"intr_cnt_ipi\",\n          \"value\": 1709054\n        },\n        {\n          \"name\": \"intr_cnt_total\",\n          \"value\": 1215253490\n        },\n        {\n          \"name\": \"time\",\n          \"value\": 1677516216\n        },\n        {\n          \"name\": \"uptime\",\n          \"value\": 3978648\n        },\n        {\n          \"name\": \"processor_plevel_time\",\n          \"values\": [\n            3405835479577,\n            2628275207938,\n            1916273074545,\n            1366761457118,\n            964863281216,\n            676002919489,\n            472533086045,\n            331487674159,\n            234447654307,\n            167247803300,\n            120098535891,\n            86312126550,\n            61675398266,\n            43549889374,\n            30176461104,\n            19891286233,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"0_CPU\",\n            \"1_CPU\",\n            \"2_CPU\",\n            \"3_CPU\",\n            \"4_CPU\",\n            \"5_CPU\",\n            \"6_CPU\",\n            \"7_CPU\",\n            \"8_CPU\",\n            \"9_CPU\",\n            \"10_CPU\",\n            \"11_CPU\",\n            \"12_CPU\",\n            \"13_CPU\",\n            \"14_CPU\",\n            \"15_CPU\",\n            \"16_CPU\",\n            \"17_CPU\",\n            \"18_CPU\",\n            \"19_CPU\",\n            \"20_CPU\",\n            \"21_CPU\",\n            \"22_CPU\",\n            \"23_CPU\",\n            \"24_CPU\",\n            \"25_CPU\",\n            \"26_CPU\",\n            \"27_CPU\",\n            \"28_CPU\",\n            \"29_CPU\",\n            \"30_CPU\",\n            \"31_CPU\",\n            \"32_CPU\",\n            \"33_CPU\",\n            \"34_CPU\",\n            \"35_CPU\",\n            \"36_CPU\",\n            \"37_CPU\",\n            \"38_CPU\",\n            \"39_CPU\",\n            \"40_CPU\",\n            \"41_CPU\",\n            \"42_CPU\",\n            \"43_CPU\",\n            \"44_CPU\",\n            \"45_CPU\",\n            \"46_CPU\",\n            \"47_CPU\",\n            \"48_CPU\",\n            \"49_CPU\",\n            \"50_CPU\",\n            \"51_CPU\",\n            \"52_CPU\",\n            \"53_CPU\",\n            \"54_CPU\",\n            \"55_CPU\",\n            \"56_CPU\",\n            \"57_CPU\",\n            \"58_CPU\",\n            \"59_CPU\",\n            \"60_CPU\",\n            \"61_CPU\",\n            \"62_CPU\",\n            \"63_CPU\",\n            \"64_CPU\",\n            \"65_CPU\",\n            \"66_CPU\",\n            \"67_CPU\",\n            \"68_CPU\",\n            \"69_CPU\",\n            \"70_CPU\",\n            \"71_CPU\",\n            \"72_CPU\",\n            \"73_CPU\",\n            \"74_CPU\",\n            \"75_CPU\",\n            \"76_CPU\",\n            \"77_CPU\",\n            \"78_CPU\",\n            \"79_CPU\",\n            \"80_CPU\",\n            \"81_CPU\",\n            \"82_CPU\",\n            \"83_CPU\",\n            \"84_CPU\",\n            \"85_CPU\",\n            \"86_CPU\",\n            \"87_CPU\",\n            \"88_CPU\",\n            \"89_CPU\",\n            \"90_CPU\",\n            \"91_CPU\",\n            \"92_CPU\",\n            \"93_CPU\",\n            \"94_CPU\",\n            \"95_CPU\",\n            \"96_CPU\",\n            \"97_CPU\",\n            \"98_CPU\",\n            \"99_CPU\",\n            \"100_CPU\",\n            \"101_CPU\",\n            \"102_CPU\",\n            \"103_CPU\",\n            \"104_CPU\",\n            \"105_CPU\",\n            \"106_CPU\",\n            \"107_CPU\",\n            \"108_CPU\",\n            \"109_CPU\",\n            \"110_CPU\",\n            \"111_CPU\",\n            \"112_CPU\",\n            \"113_CPU\",\n            \"114_CPU\",\n            \"115_CPU\",\n            \"116_CPU\",\n            \"117_CPU\",\n            \"118_CPU\",\n            \"119_CPU\",\n            \"120_CPU\",\n            \"121_CPU\",\n            \"122_CPU\",\n            \"123_CPU\",\n            \"124_CPU\",\n            \"125_CPU\",\n            \"126_CPU\",\n            \"127_CPU\"\n          ]\n        },\n        {\n          \"name\": \"processor_plevel\",\n          \"values\": [\n            3405835479577,\n            2628275207938,\n            1916273074545,\n            1366761457118,\n            964863281216,\n            676002919489,\n            472533086045,\n            331487674159,\n            234447654307,\n            167247803300,\n            120098535891,\n            86312126550,\n            61675398266,\n            43549889374,\n            30176461104,\n            19891286233,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"0_CPU\",\n            \"1_CPU\",\n            \"2_CPU\",\n            \"3_CPU\",\n            \"4_CPU\",\n            \"5_CPU\",\n            \"6_CPU\",\n            \"7_CPU\",\n            \"8_CPU\",\n            \"9_CPU\",\n            \"10_CPU\",\n            \"11_CPU\",\n            \"12_CPU\",\n            \"13_CPU\",\n            \"14_CPU\",\n            \"15_CPU\",\n            \"16_CPU\",\n            \"17_CPU\",\n            \"18_CPU\",\n            \"19_CPU\",\n            \"20_CPU\",\n            \"21_CPU\",\n            \"22_CPU\",\n            \"23_CPU\",\n            \"24_CPU\",\n            \"25_CPU\",\n            \"26_CPU\",\n            \"27_CPU\",\n            \"28_CPU\",\n            \"29_CPU\",\n            \"30_CPU\",\n            \"31_CPU\",\n            \"32_CPU\",\n            \"33_CPU\",\n            \"34_CPU\",\n            \"35_CPU\",\n            \"36_CPU\",\n            \"37_CPU\",\n            \"38_CPU\",\n            \"39_CPU\",\n            \"40_CPU\",\n            \"41_CPU\",\n            \"42_CPU\",\n            \"43_CPU\",\n            \"44_CPU\",\n            \"45_CPU\",\n            \"46_CPU\",\n            \"47_CPU\",\n            \"48_CPU\",\n            \"49_CPU\",\n            \"50_CPU\",\n            \"51_CPU\",\n            \"52_CPU\",\n            \"53_CPU\",\n            \"54_CPU\",\n            \"55_CPU\",\n            \"56_CPU\",\n            \"57_CPU\",\n            \"58_CPU\",\n            \"59_CPU\",\n            \"60_CPU\",\n            \"61_CPU\",\n            \"62_CPU\",\n            \"63_CPU\",\n            \"64_CPU\",\n            \"65_CPU\",\n            \"66_CPU\",\n            \"67_CPU\",\n            \"68_CPU\",\n            \"69_CPU\",\n            \"70_CPU\",\n            \"71_CPU\",\n            \"72_CPU\",\n            \"73_CPU\",\n            \"74_CPU\",\n            \"75_CPU\",\n            \"76_CPU\",\n            \"77_CPU\",\n            \"78_CPU\",\n            \"79_CPU\",\n            \"80_CPU\",\n            \"81_CPU\",\n            \"82_CPU\",\n            \"83_CPU\",\n            \"84_CPU\",\n            \"85_CPU\",\n            \"86_CPU\",\n            \"87_CPU\",\n            \"88_CPU\",\n            \"89_CPU\",\n            \"90_CPU\",\n            \"91_CPU\",\n            \"92_CPU\",\n            \"93_CPU\",\n            \"94_CPU\",\n            \"95_CPU\",\n            \"96_CPU\",\n            \"97_CPU\",\n            \"98_CPU\",\n            \"99_CPU\",\n            \"100_CPU\",\n            \"101_CPU\",\n            \"102_CPU\",\n            \"103_CPU\",\n            \"104_CPU\",\n            \"105_CPU\",\n            \"106_CPU\",\n            \"107_CPU\",\n            \"108_CPU\",\n            \"109_CPU\",\n            \"110_CPU\",\n            \"111_CPU\",\n            \"112_CPU\",\n            \"113_CPU\",\n            \"114_CPU\",\n            \"115_CPU\",\n            \"116_CPU\",\n            \"117_CPU\",\n            \"118_CPU\",\n            \"119_CPU\",\n            \"120_CPU\",\n            \"121_CPU\",\n            \"122_CPU\",\n            \"123_CPU\",\n            \"124_CPU\",\n            \"125_CPU\",\n            \"126_CPU\",\n            \"127_CPU\"\n          ]\n        },\n        {\n          \"name\": \"domain_busy\",\n          \"values\": [\n            51049666116086,\n            13419960088,\n            13297686377,\n            1735383373870,\n            39183250298,\n            6728050897,\n            28229793795,\n            17493622207,\n            122290467,\n            974721172619,\n            47944793823,\n            164946850,\n            4162377932,\n            407009733276,\n            128199854099,\n            9037374471285,\n            38911301970,\n            366749865,\n            732045734,\n            2997541695,\n            14,\n            18,\n            40\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"domain_shared\",\n          \"values\": [\n            0,\n            685164024474,\n            0,\n            0,\n            0,\n            24684879894,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"dswitchto_cnt\",\n          \"values\": [\n            0,\n            322698663,\n            172936437,\n            446893016,\n            96971,\n            39788918,\n            5,\n            10,\n            10670440,\n            22,\n            7,\n            836,\n            2407967,\n            9798186907,\n            9802868991,\n            265242,\n            53,\n            2614118,\n            4430780,\n            66117706,\n            1,\n            1,\n            1\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"intr_cnt\",\n          \"values\": [\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            4191453008,\n            8181232,\n            1625052957,\n            0,\n            71854,\n            0,\n            71854,\n            0,\n            5,\n            0,\n            5,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"dev_0\",\n            \"dev_1\",\n            \"dev_2\",\n            \"dev_3\",\n            \"dev_4\",\n            \"dev_5\",\n            \"dev_6\",\n            \"dev_7\",\n            \"dev_8\",\n            \"dev_9\",\n            \"dev_10\",\n            \"dev_11\",\n            \"dev_12\",\n            \"dev_13\",\n            \"dev_14\",\n            \"dev_15\",\n            \"dev_16\",\n            \"dev_17\",\n            \"dev_18\",\n            \"dev_19\",\n            \"dev_20\",\n            \"dev_21\",\n            \"dev_22\",\n            \"dev_23\",\n            \"dev_24\",\n            \"dev_25\",\n            \"dev_26\",\n            \"dev_27\",\n            \"dev_28\",\n            \"dev_29\",\n            \"dev_30\",\n            \"dev_31\",\n            \"dev_32\",\n            \"dev_33\",\n            \"dev_34\",\n            \"dev_35\",\n            \"dev_36\",\n            \"dev_37\",\n            \"dev_38\",\n            \"dev_39\",\n            \"dev_40\",\n            \"dev_41\",\n            \"dev_42\",\n            \"dev_43\",\n            \"dev_44\",\n            \"dev_45\",\n            \"dev_46\",\n            \"dev_47\",\n            \"dev_48\",\n            \"dev_49\",\n            \"dev_50\",\n            \"dev_51\",\n            \"dev_52\",\n            \"dev_53\",\n            \"dev_54\",\n            \"dev_55\",\n            \"dev_56\",\n            \"dev_57\",\n            \"dev_58\",\n            \"dev_59\",\n            \"dev_60\",\n            \"dev_61\",\n            \"dev_62\",\n            \"dev_63\",\n            \"dev_64\",\n            \"dev_65\",\n            \"dev_66\",\n            \"dev_67\",\n            \"dev_68\",\n            \"dev_69\",\n            \"dev_70\",\n            \"dev_71\",\n            \"dev_72\",\n            \"dev_73\",\n            \"dev_74\",\n            \"dev_75\",\n            \"dev_76\",\n            \"dev_77\",\n            \"dev_78\",\n            \"dev_79\",\n            \"dev_80\",\n            \"dev_81\",\n            \"dev_82\",\n            \"dev_83\",\n            \"dev_84\",\n            \"dev_85\",\n            \"dev_86\",\n            \"dev_87\",\n            \"dev_88\",\n            \"dev_89\",\n            \"dev_90\",\n            \"dev_91\",\n            \"dev_92\",\n            \"dev_93\",\n            \"dev_94\",\n            \"dev_95\",\n            \"dev_96\",\n            \"dev_97\",\n            \"dev_98\",\n            \"dev_99\",\n            \"dev_100\",\n            \"dev_101\",\n            \"dev_102\",\n            \"dev_103\",\n            \"dev_104\",\n            \"dev_105\",\n            \"dev_106\",\n            \"dev_107\",\n            \"dev_108\",\n            \"dev_109\",\n            \"dev_110\",\n            \"dev_111\",\n            \"dev_112\",\n            \"dev_113\",\n            \"dev_114\",\n            \"dev_115\",\n            \"dev_116\",\n            \"dev_117\",\n            \"dev_118\",\n            \"dev_119\",\n            \"dev_120\",\n            \"dev_121\",\n            \"dev_122\",\n            \"dev_123\",\n            \"dev_124\",\n            \"dev_125\",\n            \"dev_126\",\n            \"dev_127\",\n            \"dev_128\",\n            \"dev_129\",\n            \"dev_130\",\n            \"dev_131\",\n            \"dev_132\",\n            \"dev_133\",\n            \"dev_134\",\n            \"dev_135\",\n            \"dev_136\",\n            \"dev_137\",\n            \"dev_138\",\n            \"dev_139\",\n            \"dev_140\",\n            \"dev_141\",\n            \"dev_142\",\n            \"dev_143\",\n            \"dev_144\",\n            \"dev_145\",\n            \"dev_146\",\n            \"dev_147\",\n            \"dev_148\",\n            \"dev_149\",\n            \"dev_150\",\n            \"dev_151\",\n            \"dev_152\",\n            \"dev_153\",\n            \"dev_154\",\n            \"dev_155\",\n            \"dev_156\",\n            \"dev_157\",\n            \"dev_158\",\n            \"dev_159\",\n            \"dev_160\",\n            \"dev_161\",\n            \"dev_162\",\n            \"dev_163\",\n            \"dev_164\",\n            \"dev_165\",\n            \"dev_166\",\n            \"dev_167\",\n            \"dev_168\",\n            \"dev_169\",\n            \"dev_170\",\n            \"dev_171\",\n            \"dev_172\",\n            \"dev_173\",\n            \"dev_174\",\n            \"dev_175\",\n            \"dev_176\",\n            \"dev_177\",\n            \"dev_178\",\n            \"dev_179\",\n            \"dev_180\",\n            \"dev_181\",\n            \"dev_182\",\n            \"dev_183\",\n            \"dev_184\",\n            \"dev_185\",\n            \"dev_186\",\n            \"dev_187\",\n            \"dev_188\",\n            \"dev_189\",\n            \"dev_190\",\n            \"dev_191\",\n            \"dev_192\",\n            \"dev_193\",\n            \"dev_194\",\n            \"dev_195\",\n            \"dev_196\",\n            \"dev_197\",\n            \"dev_198\",\n            \"dev_199\",\n            \"dev_200\",\n            \"dev_201\",\n            \"dev_202\",\n            \"dev_203\",\n            \"dev_204\",\n            \"dev_205\",\n            \"dev_206\",\n            \"dev_207\",\n            \"dev_208\",\n            \"dev_209\",\n            \"dev_210\",\n            \"dev_211\",\n            \"dev_212\",\n            \"dev_213\",\n            \"dev_214\",\n            \"dev_215\",\n            \"dev_216\",\n            \"dev_217\",\n            \"dev_218\",\n            \"dev_219\",\n            \"dev_220\",\n            \"dev_221\",\n            \"dev_222\",\n            \"dev_223\",\n            \"dev_224\",\n            \"dev_225\",\n            \"dev_226\",\n            \"dev_227\",\n            \"dev_228\",\n            \"dev_229\",\n            \"dev_230\",\n            \"dev_231\",\n            \"dev_232\",\n            \"dev_233\",\n            \"dev_234\",\n            \"dev_235\",\n            \"dev_236\",\n            \"dev_237\",\n            \"dev_238\",\n            \"dev_239\",\n            \"dev_240\",\n            \"dev_241\",\n            \"dev_242\",\n            \"dev_243\",\n            \"dev_244\",\n            \"dev_245\",\n            \"dev_246\",\n            \"dev_247\",\n            \"dev_248\",\n            \"dev_249\",\n            \"dev_250\",\n            \"dev_251\",\n            \"dev_252\",\n            \"dev_253\",\n            \"dev_254\",\n            \"dev_255\"\n          ]\n        },\n        {\n          \"name\": \"wafliron\",\n          \"values\": [\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"iron_totstarts\",\n            \"iron_nobackup\",\n            \"iron_usebackup\"\n          ]\n        }\n      ],\n      \"aggregation\": {\n        \"count\": 2,\n        \"complete\": true\n      },\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system:node/rows/umeng-aff300-01%3A28e14eab-0580-11e8-bd9d-00a098d39e12\"\n        }\n      }\n    },\n    {\n      \"counter_table\": {\n        \"name\": \"system:node\"\n      },\n      \"id\": \"umeng-aff300-02:1524afca-0580-11e8-ae74-00a098d390f2\",\n      \"properties\": [\n        {\n          \"name\": \"node.name\",\n          \"value\": \"umeng-aff300-02\"\n        },\n        {\n          \"name\": \"system_model\",\n          \"value\": \"AFF-A300\"\n        },\n        {\n          \"name\": \"ontap_version\",\n          \"value\": \"NetApp Release R9.12.1xN_221108_1315: Tue Nov  8 15:32:25 EST 2022 \"\n        },\n        {\n          \"name\": \"compile_flags\",\n          \"value\": \"1\"\n        },\n        {\n          \"name\": \"serial_no\",\n          \"value\": \"721802000259\"\n        },\n        {\n          \"name\": \"system_id\",\n          \"value\": \"0537123843\"\n        },\n        {\n          \"name\": \"hostname\",\n          \"value\": \"umeng-aff300-02\"\n        },\n        {\n          \"name\": \"name\",\n          \"value\": \"umeng-aff300-02\"\n        },\n        {\n          \"name\": \"uuid\",\n          \"value\": \"1524afca-0580-11e8-ae74-00a098d390f2\"\n        }\n      ],\n      \"counters\": [\n        {\n          \"name\": \"memory\",\n          \"value\": 88766\n        },\n        {\n          \"name\": \"nfs_ops\",\n          \"value\": 2061227971\n        },\n        {\n          \"name\": \"cifs_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"fcp_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"iscsi_ops\",\n          \"value\": 183570559\n        },\n        {\n          \"name\": \"nvme_fc_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_ops\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"network_data_received\",\n          \"value\": 28707362447\n        },\n        {\n          \"name\": \"network_data_sent\",\n          \"value\": 31199786274\n        },\n        {\n          \"name\": \"fcp_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"fcp_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"iscsi_data_received\",\n          \"value\": 2462501728\n        },\n        {\n          \"name\": \"iscsi_data_sent\",\n          \"value\": 962425592\n        },\n        {\n          \"name\": \"nvme_fc_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_fc_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_tcp_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"nvme_roce_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"partner_data_received\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"partner_data_sent\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"sys_read_data\",\n          \"value\": 28707362447\n        },\n        {\n          \"name\": \"sys_write_data\",\n          \"value\": 31199786274\n        },\n        {\n          \"name\": \"sys_total_data\",\n          \"value\": 59907148721\n        },\n        {\n          \"name\": \"disk_data_read\",\n          \"value\": 27355740700\n        },\n        {\n          \"name\": \"disk_data_written\",\n          \"value\": 3426898232\n        },\n        {\n          \"name\": \"hdd_data_read\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"hdd_data_written\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"ssd_data_read\",\n          \"value\": 27355740700\n        },\n        {\n          \"name\": \"ssd_data_written\",\n          \"value\": 3426898232\n        },\n        {\n          \"name\": \"tape_data_read\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"tape_data_written\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"read_ops\",\n          \"value\": 29957410\n        },\n        {\n          \"name\": \"write_ops\",\n          \"value\": 2141657620\n        },\n        {\n          \"name\": \"other_ops\",\n          \"value\": 73183500\n        },\n        {\n          \"name\": \"total_ops\",\n          \"value\": 2244798530\n        },\n        {\n          \"name\": \"read_latency\",\n          \"value\": 43283636161\n        },\n        {\n          \"name\": \"write_latency\",\n          \"value\": 1437635703835\n        },\n        {\n          \"name\": \"other_latency\",\n          \"value\": 628457365\n        },\n        {\n          \"name\": \"total_latency\",\n          \"value\": 1481547797361\n        },\n        {\n          \"name\": \"read_data\",\n          \"value\": 1908711454978\n        },\n        {\n          \"name\": \"write_data\",\n          \"value\": 23562759645410\n        },\n        {\n          \"name\": \"other_data\",\n          \"value\": 0\n        },\n        {\n          \"name\": \"total_data\",\n          \"value\": 25471471100388\n        },\n        {\n          \"name\": \"cpu_busy\",\n          \"value\": 511050841704\n        },\n        {\n          \"name\": \"cpu_elapsed_time\",\n          \"value\": 3979039364919\n        },\n        {\n          \"name\": \"average_processor_busy_percent\",\n          \"value\": 509151403977\n        },\n        {\n          \"name\": \"total_processor_busy\",\n          \"value\": 8146422463632\n        },\n        {\n          \"name\": \"total_processor_busy_time\",\n          \"value\": 8146422463632\n        },\n        {\n          \"name\": \"num_processors\",\n          \"value\": 16\n        },\n        {\n          \"name\": \"interrupt_time\",\n          \"value\": 108155323601\n        },\n        {\n          \"name\": \"interrupt\",\n          \"value\": 108155323601\n        },\n        {\n          \"name\": \"interrupt_num\",\n          \"value\": 3369179127\n        },\n        {\n          \"name\": \"time_per_interrupt\",\n          \"value\": 108155323601\n        },\n        {\n          \"name\": \"non_interrupt_time\",\n          \"value\": 8038267140031\n        },\n        {\n          \"name\": \"non_interrupt\",\n          \"value\": 8038267140031\n        },\n        {\n          \"name\": \"idle_time\",\n          \"value\": 55518207375072\n        },\n        {\n          \"name\": \"idle\",\n          \"value\": 55518207375072\n        },\n        {\n          \"name\": \"cp_time\",\n          \"value\": 64306316680\n        },\n        {\n          \"name\": \"cp\",\n          \"value\": 64306316680\n        },\n        {\n          \"name\": \"interrupt_in_cp_time\",\n          \"value\": 2024956616\n        },\n        {\n          \"name\": \"interrupt_in_cp\",\n          \"value\": 2024956616\n        },\n        {\n          \"name\": \"interrupt_num_in_cp\",\n          \"value\": 2661183541\n        },\n        {\n          \"name\": \"time_per_interrupt_in_cp\",\n          \"value\": 2024956616\n        },\n        {\n          \"name\": \"sk_switches\",\n          \"value\": 2798598514\n        },\n        {\n          \"name\": \"hard_switches\",\n          \"value\": 1354185066\n        },\n        {\n          \"name\": \"intr_cnt_msec\",\n          \"value\": 3978642246\n        },\n        {\n          \"name\": \"intr_cnt_ipi\",\n          \"value\": 797281\n        },\n        {\n          \"name\": \"intr_cnt_total\",\n          \"value\": 905575861\n        },\n        {\n          \"name\": \"time\",\n          \"value\": 1677516216\n        },\n        {\n          \"name\": \"uptime\",\n          \"value\": 3978643\n        },\n        {\n          \"name\": \"processor_plevel_time\",\n          \"values\": [\n            2878770221447,\n            1882901052733,\n            1209134416474,\n            771086627192,\n            486829133301,\n            306387520688,\n            193706139760,\n            123419519944,\n            79080346535,\n            50459518003,\n            31714732122,\n            19476561954,\n            11616026278,\n            6666253598,\n            3623880168,\n            1790458071,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"0_CPU\",\n            \"1_CPU\",\n            \"2_CPU\",\n            \"3_CPU\",\n            \"4_CPU\",\n            \"5_CPU\",\n            \"6_CPU\",\n            \"7_CPU\",\n            \"8_CPU\",\n            \"9_CPU\",\n            \"10_CPU\",\n            \"11_CPU\",\n            \"12_CPU\",\n            \"13_CPU\",\n            \"14_CPU\",\n            \"15_CPU\",\n            \"16_CPU\",\n            \"17_CPU\",\n            \"18_CPU\",\n            \"19_CPU\",\n            \"20_CPU\",\n            \"21_CPU\",\n            \"22_CPU\",\n            \"23_CPU\",\n            \"24_CPU\",\n            \"25_CPU\",\n            \"26_CPU\",\n            \"27_CPU\",\n            \"28_CPU\",\n            \"29_CPU\",\n            \"30_CPU\",\n            \"31_CPU\",\n            \"32_CPU\",\n            \"33_CPU\",\n            \"34_CPU\",\n            \"35_CPU\",\n            \"36_CPU\",\n            \"37_CPU\",\n            \"38_CPU\",\n            \"39_CPU\",\n            \"40_CPU\",\n            \"41_CPU\",\n            \"42_CPU\",\n            \"43_CPU\",\n            \"44_CPU\",\n            \"45_CPU\",\n            \"46_CPU\",\n            \"47_CPU\",\n            \"48_CPU\",\n            \"49_CPU\",\n            \"50_CPU\",\n            \"51_CPU\",\n            \"52_CPU\",\n            \"53_CPU\",\n            \"54_CPU\",\n            \"55_CPU\",\n            \"56_CPU\",\n            \"57_CPU\",\n            \"58_CPU\",\n            \"59_CPU\",\n            \"60_CPU\",\n            \"61_CPU\",\n            \"62_CPU\",\n            \"63_CPU\",\n            \"64_CPU\",\n            \"65_CPU\",\n            \"66_CPU\",\n            \"67_CPU\",\n            \"68_CPU\",\n            \"69_CPU\",\n            \"70_CPU\",\n            \"71_CPU\",\n            \"72_CPU\",\n            \"73_CPU\",\n            \"74_CPU\",\n            \"75_CPU\",\n            \"76_CPU\",\n            \"77_CPU\",\n            \"78_CPU\",\n            \"79_CPU\",\n            \"80_CPU\",\n            \"81_CPU\",\n            \"82_CPU\",\n            \"83_CPU\",\n            \"84_CPU\",\n            \"85_CPU\",\n            \"86_CPU\",\n            \"87_CPU\",\n            \"88_CPU\",\n            \"89_CPU\",\n            \"90_CPU\",\n            \"91_CPU\",\n            \"92_CPU\",\n            \"93_CPU\",\n            \"94_CPU\",\n            \"95_CPU\",\n            \"96_CPU\",\n            \"97_CPU\",\n            \"98_CPU\",\n            \"99_CPU\",\n            \"100_CPU\",\n            \"101_CPU\",\n            \"102_CPU\",\n            \"103_CPU\",\n            \"104_CPU\",\n            \"105_CPU\",\n            \"106_CPU\",\n            \"107_CPU\",\n            \"108_CPU\",\n            \"109_CPU\",\n            \"110_CPU\",\n            \"111_CPU\",\n            \"112_CPU\",\n            \"113_CPU\",\n            \"114_CPU\",\n            \"115_CPU\",\n            \"116_CPU\",\n            \"117_CPU\",\n            \"118_CPU\",\n            \"119_CPU\",\n            \"120_CPU\",\n            \"121_CPU\",\n            \"122_CPU\",\n            \"123_CPU\",\n            \"124_CPU\",\n            \"125_CPU\",\n            \"126_CPU\",\n            \"127_CPU\"\n          ]\n        },\n        {\n          \"name\": \"processor_plevel\",\n          \"values\": [\n            2878770221447,\n            1882901052733,\n            1209134416474,\n            771086627192,\n            486829133301,\n            306387520688,\n            193706139760,\n            123419519944,\n            79080346535,\n            50459518003,\n            31714732122,\n            19476561954,\n            11616026278,\n            6666253598,\n            3623880168,\n            1790458071,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"0_CPU\",\n            \"1_CPU\",\n            \"2_CPU\",\n            \"3_CPU\",\n            \"4_CPU\",\n            \"5_CPU\",\n            \"6_CPU\",\n            \"7_CPU\",\n            \"8_CPU\",\n            \"9_CPU\",\n            \"10_CPU\",\n            \"11_CPU\",\n            \"12_CPU\",\n            \"13_CPU\",\n            \"14_CPU\",\n            \"15_CPU\",\n            \"16_CPU\",\n            \"17_CPU\",\n            \"18_CPU\",\n            \"19_CPU\",\n            \"20_CPU\",\n            \"21_CPU\",\n            \"22_CPU\",\n            \"23_CPU\",\n            \"24_CPU\",\n            \"25_CPU\",\n            \"26_CPU\",\n            \"27_CPU\",\n            \"28_CPU\",\n            \"29_CPU\",\n            \"30_CPU\",\n            \"31_CPU\",\n            \"32_CPU\",\n            \"33_CPU\",\n            \"34_CPU\",\n            \"35_CPU\",\n            \"36_CPU\",\n            \"37_CPU\",\n            \"38_CPU\",\n            \"39_CPU\",\n            \"40_CPU\",\n            \"41_CPU\",\n            \"42_CPU\",\n            \"43_CPU\",\n            \"44_CPU\",\n            \"45_CPU\",\n            \"46_CPU\",\n            \"47_CPU\",\n            \"48_CPU\",\n            \"49_CPU\",\n            \"50_CPU\",\n            \"51_CPU\",\n            \"52_CPU\",\n            \"53_CPU\",\n            \"54_CPU\",\n            \"55_CPU\",\n            \"56_CPU\",\n            \"57_CPU\",\n            \"58_CPU\",\n            \"59_CPU\",\n            \"60_CPU\",\n            \"61_CPU\",\n            \"62_CPU\",\n            \"63_CPU\",\n            \"64_CPU\",\n            \"65_CPU\",\n            \"66_CPU\",\n            \"67_CPU\",\n            \"68_CPU\",\n            \"69_CPU\",\n            \"70_CPU\",\n            \"71_CPU\",\n            \"72_CPU\",\n            \"73_CPU\",\n            \"74_CPU\",\n            \"75_CPU\",\n            \"76_CPU\",\n            \"77_CPU\",\n            \"78_CPU\",\n            \"79_CPU\",\n            \"80_CPU\",\n            \"81_CPU\",\n            \"82_CPU\",\n            \"83_CPU\",\n            \"84_CPU\",\n            \"85_CPU\",\n            \"86_CPU\",\n            \"87_CPU\",\n            \"88_CPU\",\n            \"89_CPU\",\n            \"90_CPU\",\n            \"91_CPU\",\n            \"92_CPU\",\n            \"93_CPU\",\n            \"94_CPU\",\n            \"95_CPU\",\n            \"96_CPU\",\n            \"97_CPU\",\n            \"98_CPU\",\n            \"99_CPU\",\n            \"100_CPU\",\n            \"101_CPU\",\n            \"102_CPU\",\n            \"103_CPU\",\n            \"104_CPU\",\n            \"105_CPU\",\n            \"106_CPU\",\n            \"107_CPU\",\n            \"108_CPU\",\n            \"109_CPU\",\n            \"110_CPU\",\n            \"111_CPU\",\n            \"112_CPU\",\n            \"113_CPU\",\n            \"114_CPU\",\n            \"115_CPU\",\n            \"116_CPU\",\n            \"117_CPU\",\n            \"118_CPU\",\n            \"119_CPU\",\n            \"120_CPU\",\n            \"121_CPU\",\n            \"122_CPU\",\n            \"123_CPU\",\n            \"124_CPU\",\n            \"125_CPU\",\n            \"126_CPU\",\n            \"127_CPU\"\n          ]\n        },\n        {\n          \"name\": \"domain_busy\",\n          \"values\": [\n            55518207375080,\n            8102895398,\n            12058227646,\n            991838747162,\n            28174147737,\n            6669066926,\n            14245801778,\n            9009875224,\n            118982762,\n            177496844302,\n            5888814259,\n            167280195,\n            3851617905,\n            484154906167,\n            91240285306,\n            6180138216837,\n            22111798640,\n            344700584,\n            266304074,\n            2388625825,\n            16,\n            21,\n            19\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"domain_shared\",\n          \"values\": [\n            0,\n            153663450171,\n            0,\n            0,\n            0,\n            11834112384,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"dswitchto_cnt\",\n          \"values\": [\n            0,\n            178192633,\n            143964155,\n            286324250,\n            2365,\n            39684121,\n            5,\n            10,\n            10715325,\n            22,\n            7,\n            30,\n            2407970,\n            7865489299,\n            7870331008,\n            265242,\n            53,\n            2535145,\n            3252888,\n            53334340,\n            1,\n            1,\n            1\n          ],\n          \"labels\": [\n            \"idle\",\n            \"kahuna\",\n            \"storage\",\n            \"exempt\",\n            \"none\",\n            \"raid\",\n            \"raid_exempt\",\n            \"xor_exempt\",\n            \"target\",\n            \"wafl_exempt\",\n            \"wafl_mpcleaner\",\n            \"sm_exempt\",\n            \"protocol\",\n            \"nwk_exempt\",\n            \"network\",\n            \"hostOS\",\n            \"ssan_exempt\",\n            \"unclassified\",\n            \"kahuna_legacy\",\n            \"ha\",\n            \"ssan_exempt2\",\n            \"exempt_ise\",\n            \"zombie\"\n          ]\n        },\n        {\n          \"name\": \"intr_cnt\",\n          \"values\": [\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            724698481,\n            8181275,\n            488080162,\n            0,\n            71856,\n            0,\n            71856,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"dev_0\",\n            \"dev_1\",\n            \"dev_2\",\n            \"dev_3\",\n            \"dev_4\",\n            \"dev_5\",\n            \"dev_6\",\n            \"dev_7\",\n            \"dev_8\",\n            \"dev_9\",\n            \"dev_10\",\n            \"dev_11\",\n            \"dev_12\",\n            \"dev_13\",\n            \"dev_14\",\n            \"dev_15\",\n            \"dev_16\",\n            \"dev_17\",\n            \"dev_18\",\n            \"dev_19\",\n            \"dev_20\",\n            \"dev_21\",\n            \"dev_22\",\n            \"dev_23\",\n            \"dev_24\",\n            \"dev_25\",\n            \"dev_26\",\n            \"dev_27\",\n            \"dev_28\",\n            \"dev_29\",\n            \"dev_30\",\n            \"dev_31\",\n            \"dev_32\",\n            \"dev_33\",\n            \"dev_34\",\n            \"dev_35\",\n            \"dev_36\",\n            \"dev_37\",\n            \"dev_38\",\n            \"dev_39\",\n            \"dev_40\",\n            \"dev_41\",\n            \"dev_42\",\n            \"dev_43\",\n            \"dev_44\",\n            \"dev_45\",\n            \"dev_46\",\n            \"dev_47\",\n            \"dev_48\",\n            \"dev_49\",\n            \"dev_50\",\n            \"dev_51\",\n            \"dev_52\",\n            \"dev_53\",\n            \"dev_54\",\n            \"dev_55\",\n            \"dev_56\",\n            \"dev_57\",\n            \"dev_58\",\n            \"dev_59\",\n            \"dev_60\",\n            \"dev_61\",\n            \"dev_62\",\n            \"dev_63\",\n            \"dev_64\",\n            \"dev_65\",\n            \"dev_66\",\n            \"dev_67\",\n            \"dev_68\",\n            \"dev_69\",\n            \"dev_70\",\n            \"dev_71\",\n            \"dev_72\",\n            \"dev_73\",\n            \"dev_74\",\n            \"dev_75\",\n            \"dev_76\",\n            \"dev_77\",\n            \"dev_78\",\n            \"dev_79\",\n            \"dev_80\",\n            \"dev_81\",\n            \"dev_82\",\n            \"dev_83\",\n            \"dev_84\",\n            \"dev_85\",\n            \"dev_86\",\n            \"dev_87\",\n            \"dev_88\",\n            \"dev_89\",\n            \"dev_90\",\n            \"dev_91\",\n            \"dev_92\",\n            \"dev_93\",\n            \"dev_94\",\n            \"dev_95\",\n            \"dev_96\",\n            \"dev_97\",\n            \"dev_98\",\n            \"dev_99\",\n            \"dev_100\",\n            \"dev_101\",\n            \"dev_102\",\n            \"dev_103\",\n            \"dev_104\",\n            \"dev_105\",\n            \"dev_106\",\n            \"dev_107\",\n            \"dev_108\",\n            \"dev_109\",\n            \"dev_110\",\n            \"dev_111\",\n            \"dev_112\",\n            \"dev_113\",\n            \"dev_114\",\n            \"dev_115\",\n            \"dev_116\",\n            \"dev_117\",\n            \"dev_118\",\n            \"dev_119\",\n            \"dev_120\",\n            \"dev_121\",\n            \"dev_122\",\n            \"dev_123\",\n            \"dev_124\",\n            \"dev_125\",\n            \"dev_126\",\n            \"dev_127\",\n            \"dev_128\",\n            \"dev_129\",\n            \"dev_130\",\n            \"dev_131\",\n            \"dev_132\",\n            \"dev_133\",\n            \"dev_134\",\n            \"dev_135\",\n            \"dev_136\",\n            \"dev_137\",\n            \"dev_138\",\n            \"dev_139\",\n            \"dev_140\",\n            \"dev_141\",\n            \"dev_142\",\n            \"dev_143\",\n            \"dev_144\",\n            \"dev_145\",\n            \"dev_146\",\n            \"dev_147\",\n            \"dev_148\",\n            \"dev_149\",\n            \"dev_150\",\n            \"dev_151\",\n            \"dev_152\",\n            \"dev_153\",\n            \"dev_154\",\n            \"dev_155\",\n            \"dev_156\",\n            \"dev_157\",\n            \"dev_158\",\n            \"dev_159\",\n            \"dev_160\",\n            \"dev_161\",\n            \"dev_162\",\n            \"dev_163\",\n            \"dev_164\",\n            \"dev_165\",\n            \"dev_166\",\n            \"dev_167\",\n            \"dev_168\",\n            \"dev_169\",\n            \"dev_170\",\n            \"dev_171\",\n            \"dev_172\",\n            \"dev_173\",\n            \"dev_174\",\n            \"dev_175\",\n            \"dev_176\",\n            \"dev_177\",\n            \"dev_178\",\n            \"dev_179\",\n            \"dev_180\",\n            \"dev_181\",\n            \"dev_182\",\n            \"dev_183\",\n            \"dev_184\",\n            \"dev_185\",\n            \"dev_186\",\n            \"dev_187\",\n            \"dev_188\",\n            \"dev_189\",\n            \"dev_190\",\n            \"dev_191\",\n            \"dev_192\",\n            \"dev_193\",\n            \"dev_194\",\n            \"dev_195\",\n            \"dev_196\",\n            \"dev_197\",\n            \"dev_198\",\n            \"dev_199\",\n            \"dev_200\",\n            \"dev_201\",\n            \"dev_202\",\n            \"dev_203\",\n            \"dev_204\",\n            \"dev_205\",\n            \"dev_206\",\n            \"dev_207\",\n            \"dev_208\",\n            \"dev_209\",\n            \"dev_210\",\n            \"dev_211\",\n            \"dev_212\",\n            \"dev_213\",\n            \"dev_214\",\n            \"dev_215\",\n            \"dev_216\",\n            \"dev_217\",\n            \"dev_218\",\n            \"dev_219\",\n            \"dev_220\",\n            \"dev_221\",\n            \"dev_222\",\n            \"dev_223\",\n            \"dev_224\",\n            \"dev_225\",\n            \"dev_226\",\n            \"dev_227\",\n            \"dev_228\",\n            \"dev_229\",\n            \"dev_230\",\n            \"dev_231\",\n            \"dev_232\",\n            \"dev_233\",\n            \"dev_234\",\n            \"dev_235\",\n            \"dev_236\",\n            \"dev_237\",\n            \"dev_238\",\n            \"dev_239\",\n            \"dev_240\",\n            \"dev_241\",\n            \"dev_242\",\n            \"dev_243\",\n            \"dev_244\",\n            \"dev_245\",\n            \"dev_246\",\n            \"dev_247\",\n            \"dev_248\",\n            \"dev_249\",\n            \"dev_250\",\n            \"dev_251\",\n            \"dev_252\",\n            \"dev_253\",\n            \"dev_254\",\n            \"dev_255\"\n          ]\n        },\n        {\n          \"name\": \"wafliron\",\n          \"values\": [\n            0,\n            0,\n            0\n          ],\n          \"labels\": [\n            \"iron_totstarts\",\n            \"iron_nobackup\",\n            \"iron_usebackup\"\n          ]\n        }\n      ],\n      \"aggregation\": {\n        \"count\": 2,\n        \"complete\": true\n      },\n      \"_links\": {\n        \"self\": {\n          \"href\": \"/api/cluster/counter/tables/system:node/rows/umeng-aff300-02%3A1524afca-0580-11e8-ae74-00a098d390f2\"\n        }\n      }\n    }\n  ],\n  \"num_records\": 2,\n  \"_links\": {\n    \"self\": {\n      \"href\": \"/api/cluster/counter/tables/system:node/rows?fields=*&return_records=true\"\n    }\n  }\n}\n

        "},{"location":"resources/rest-perf-metrics/#references","title":"References","text":"
        • Harvest REST Strategy
        • ONTAP 9.11.1 ONTAPI-to-REST Counter Manager Mapping
        • ONTAP REST API reference documentation
        • ONTAP REST API
        "},{"location":"resources/templates-and-metrics/","title":"Harvest Templates and Metrics","text":"

        Harvest collects ONTAP counter information, augments it, and stores it in a time-series DB. Refer ONTAP Metrics for details about ONTAP metrics exposed by Harvest.

        flowchart RL\n    Harvest[Harvest<br>Get & Augment] -- REST<br>ZAPI --> ONTAP\n    id1[(Prometheus<br>Store)] -- Scrape --> Harvest

        Three concepts work in unison to collect ONTAP metrics data, prepare it and make it available to Prometheus.

        • ZAPI/REST
        • Harvest templates
        • Exporters

        We're going to walk through an example from a running system, focusing on the disk object.

        At a high-level, Harvest templates describe what ZAPIs to send to ONTAP and how to interpret the responses.

        • ONTAP defines twos ZAPIs to collect disk info
          • Config information is collected via storage-disk-get-iter
          • Performance counters are collected via disk:constituent
        • These ZAPIs are found in their corresponding object template file conf/zapi/cdot/9.8.0/disk.yaml and conf/zapiperf/cdot/9.8.0/disk.yaml. These files also describe how to map the ZAPI responses into a time-series-friendly format
        • Prometheus uniquely identifies a time series by its metric name and optional key-value pairs called labels.
        "},{"location":"resources/templates-and-metrics/#handy-tools","title":"Handy Tools","text":"
        • dasel is useful to convert between XML, YAML, JSON, etc. We'll use it to make displaying some of the data easier.
        "},{"location":"resources/templates-and-metrics/#ontap-zapi-disk-example","title":"ONTAP ZAPI disk example","text":"

        We'll use the bin/harvest zapi tool to interrogate the cluster and gather information about the counters. This is one way you can send ZAPIs to ONTAP and explore the return types and values.

        bin/harvest zapi -p u2 show attrs --api storage-disk-get-iter\n

        Output edited for brevity and line numbers added on left

        The hierarchy and return type of each counter is shown below. We'll use this hierarchy to build a matching Harvest template. For example, line 3 is the bytes-per-sector counter, which has an integer value, and is the child of storage-disk-info > disk-inventory-info.

        To capture that counter's value as a metric in a Harvest, the ZAPI template must use the same hierarchical path. The matching path can be seen below.

        building tree for attribute [attributes-list] => [storage-disk-info]\n\n 1 [storage-disk-info]            -               *\n 2   [disk-inventory-info]        -                \n 3     [bytes-per-sector]         -         integer\n 4     [capacity-sectors]         -         integer\n 5     [disk-type]                -          string\n 6     [is-shared]                -         boolean\n 7     [model]                    -          string\n 8     [serial-number]            -          string\n 9     [shelf]                    -          string\n10     [shelf-bay]                -          string\n11   [disk-name]                  -          string\n12   [disk-ownership-info]        -                \n13     [home-node-name]           -          string\n14     [is-failed]                -         boolean\n15     [owner-node-name]          -          string\n16   [disk-raid-info]             -                \n17     [container-type]           -          string\n18     [disk-outage-info]         -                \n19       [is-in-fdr]              -         boolean\n20       [reason]                 -          string  \n21   [disk-stats-info]            -                \n22     [average-latency]          -         integer\n23     [disk-io-kbps]             -         integer\n24     [power-on-time-interval]   -         integer\n25     [sectors-read]             -         integer\n26     [sectors-written]          -         integer\n27   [disk-uid]                   -          string\n28   [node-name]                  -          string\n29   [storage-disk-state]         -         integer\n30   [storage-disk-state-flags]   -         integer\n
        "},{"location":"resources/templates-and-metrics/#harvest-templates","title":"Harvest Templates","text":"

        To understand templates, there are a few concepts to cover:

        There are three kinds of information included in templates that define what Harvest collects and exports:

        1. Configuration information is exported into the _labels metric (e.g. disk_labels see below)
        2. Metrics data is exported as disk_\"metric name\" e.g. disk_bytes_per_sector, disk_sectors, etc. Metrics are leaf nodes that are not prefixed with a ^ or ^^. Metrics must be one of the number types: float or int.
        3. Plugins may add additional metrics, increasing the number of metrics exported in #2

        A resource will typically have multiple instances. Using disk as an example, that means there will be one disk_labels and a metric row per instance. If we have 24 disks and the disk template lists seven metrics to capture, Harvest will export a total of 192 rows of Prometheus data.

        24 instances * (7 metrics per instance + 1 label per instance) = 192 rows

        Sum of disk metrics that Harvest exports

        curl -s 'http://localhost:14002/metrics' | grep ^disk | cut -d'{' -f1 | sort | uniq -c\n  24 disk_bytes_per_sector\n  24 disk_labels\n  24 disk_sectors\n  24 disk_stats_average_latency\n  24 disk_stats_io_kbps\n  24 disk_stats_sectors_read\n  24 disk_stats_sectors_written\n  24 disk_uptime\n# 192 rows \n

        Read on to see how we control which labels from #1 and which metrics from #2 are included in the exported data.

        "},{"location":"resources/templates-and-metrics/#instance-keys-and-labels","title":"Instance Keys and Labels","text":"
        • Instance key - An instance key defines the set of attributes Harvest uses to construct a key that uniquely identifies an object. For example, the disk template uses the node + disk attributes to determine uniqueness. Using node or disk alone wouldn't be sufficient since disks on separate nodes can have the same name. If a single label does not uniquely identify an instance, combine multiple keys for uniqueness. Instance keys must refer to attributes that are of type string.

        Because instance keys define uniqueness, these keys are also added to each metric as a key-value pair. ( see Control What Labels and Metrics are Exported for examples)

        • Instance label - Labels are key-value pairs used to gather configuration information about each instance. All of the key-value pairs are combined into a single metric named disk_labels. There will be one disk_labels for each monitored instance. Here's an example reformatted so it's easier to read:
        disk_labels{\n  datacenter=\"dc-1\",\n  cluster=\"umeng-aff300-05-06\",\n  node=\"umeng-aff300-06\",\n  disk=\"1.1.23\",\n  type=\"SSD\",\n  model=\"X371_S1643960ATE\",\n  outage=\"\",\n  owner_node=\"umeng-aff300-06\",\n  shared=\"true\",\n  shelf=\"1\",\n  shelf_bay=\"23\",\n  serial_number=\"S3SENE0K500532\",\n  failed=\"false\",\n  container_type=\"shared\"\n}\n
        "},{"location":"resources/templates-and-metrics/#harvest-object-template","title":"Harvest Object Template","text":"

        Continuing with the disk example, below is the conf/zapi/cdot/9.8.0/disk.yaml that tells Harvest which ZAPI to send to ONTAP (storage-disk-get-iter) and describes how to interpret and export the response.

        • Line 1 defines the name of this resource and is an exact match to the object defined in your default.yaml or custom.yaml file. Eg.
        # default.yaml\nobjects:\n  Disk:  disk.yaml\n
        • Line 2 is the name of the ZAPI that Harvest will send to collect disk resources
        • Line 3 is the prefix used to export metrics associated with this object. i.e. all metrics will be of the form disk_*
        • Line 5 the counter section is where we define the metrics, labels, and what constitutes instance uniqueness
        • Line 7 the double hat prefix ^^ means this attribute is an instance key used to determine uniqueness. Instance keys are also included as labels. Uuids are good choices for uniqueness
        • Line 13 the single hat prefix ^ means this attribute should be stored as a label. That means we can include it in the export_options section as one of the key-value pairs in disk_labels
        • Rows 10, 11, 23, 24, 25, 26, 27 - these are the metrics rows - metrics are leaf nodes that are not prefixed with a ^ or ^^. If you refer back to the ONTAP ZAPI disk example above, you'll notice each of these attributes are integer types.
        • Line 43 defines the set of labels to use when constructing the disk_labels metrics. As mentioned above, these labels capture config-related attributes per instance.

        Output edited for brevity and line numbers added for reference.

         1  name:             Disk\n 2  query:            storage-disk-get-iter\n 3  object:           disk\n 4  \n 5  counters:\n 6    storage-disk-info:\n 7      - ^^disk-uid\n 8      - ^^disk-name               => disk\n 9      - disk-inventory-info:\n10        - bytes-per-sector        => bytes_per_sector        # notice this has the same hierarchical path we saw from bin/harvest zapi\n11        - capacity-sectors        => sectors\n12        - ^disk-type              => type\n13        - ^is-shared              => shared\n14        - ^model                  => model\n15        - ^serial-number          => serial_number\n16        - ^shelf                  => shelf\n17        - ^shelf-bay              => shelf_bay\n18      - disk-ownership-info:\n19        - ^home-node-name         => node\n20        - ^owner-node-name        => owner_node\n21        - ^is-failed              => failed\n22      - disk-stats-info:\n23        - average-latency\n24        - disk-io-kbps\n25        - power-on-time-interval  => uptime\n26        - sectors-read\n27        - sectors-written\n28      - disk-raid-info:\n29        - ^container-type         => container_type\n30        - disk-outage-info:\n31          - ^reason               => outage\n32  \n33  plugins:\n34    - LabelAgent:\n35      # metric label zapi_value rest_value `default_value`\n36      value_to_num:\n37        - new_status outage - - `0` #ok_value is empty value, '-' would be converted to blank while processing.\n38  \n39  export_options:\n40    instance_keys:\n41      - node\n42      - disk\n43    instance_labels:\n44      - type\n45      - model\n46      - outage\n47      - owner_node\n48      - shared\n49      - shelf\n50      - shelf_bay\n51      - serial_number\n52      - failed\n53      - container_type\n
        "},{"location":"resources/templates-and-metrics/#control-what-labels-and-metrics-are-exported","title":"Control What Labels and Metrics are Exported","text":"

        Let's continue with disk and look at a few examples. We'll use curl to examine the Prometheus wire format that Harvest uses to export the metrics from conf/zapi/cdot/9.8.0/disk.yaml.

        The curl below shows all exported disk metrics. There are 24 disks on this cluster, Harvest is collecting seven metrics + one disk_labels + one plugin-created metric, disk_new_status for a total of 216 rows.

        curl -s 'http://localhost:14002/metrics' | grep ^disk | cut -d'{' -f1 | sort | uniq -c\n  24 disk_bytes_per_sector           # metric\n  24 disk_labels                     # labels \n  24 disk_new_status                 # plugin created metric \n  24 disk_sectors                    # metric \n  24 disk_stats_average_latency      # metric   \n  24 disk_stats_io_kbps              # metric \n  24 disk_stats_sectors_read         # metric   \n  24 disk_stats_sectors_written      # metric  \n  24 disk_uptime                     # metric\n# sum = ((7 + 1 + 1) * 24 = 216 rows)\n

        Here's a disk_labels for one instance, reformatted to make it easier to read.

        curl -s 'http://localhost:14002/metrics' | grep ^disk_labels | head -1\n\ndisk_labels{\n  datacenter = \"dc-1\",                 # always included - value taken from datacenter in harvest.yml\n  cluster = \"umeng-aff300-05-06\",      # always included\n  node = \"umeng-aff300-06\",            # node is in the list of export_options instance_keys\n  disk = \"1.1.13\",                     # disk is in the list of export_options instance_keys\n  type = \"SSD\",                        # remainder are included because they are listed in the template's instance_labels\n  model = \"X371_S1643960ATE\",\n  outage = \"\",\n  owner_node = \"umeng-aff300-06\",\n  shared = \"true\",\n  shelf = \"1\",\n  shelf_bay = \"13\",\n  serial_number = \"S3SENE0K500572\",\n  failed = \"false\",\n  container_type = \"\",\n} 1.0\n

        Here's the disk_sectors metric for a single instance.

        curl -s 'http://localhost:14002/metrics' | grep ^disk_sectors | head -1\n\ndisk_sectors{                          # prefix of disk_ + metric name (line 11 in template)\n  datacenter = \"dc-1\",                 # always included - value taken from datacenter in harvest.yml\n  cluster = \"umeng-aff300-05-06\",      # always included\n  node = \"umeng-aff300-06\",            # node is in the list of export_options instance_keys\n  disk = \"1.1.17\",                     # disk is in the list of export_options instance_keys\n} 1875385008                           # metric value - number of sectors for this disk instance\n
        Number of rows for each template = number of instances * (number of metrics + 1 (for <name>_labels row) + plugin additions)\nNumber of metrics                = number of counters which are not labels or keys, those without a ^ or ^^\n
        "},{"location":"resources/templates-and-metrics/#common-errors-and-troubleshooting","title":"Common Errors and Troubleshooting","text":""},{"location":"resources/templates-and-metrics/#1-failed-to-parse-any-metrics","title":"1. Failed to parse any metrics","text":"

        You add a new template to Harvest, restart your poller, and get an error message:

        WRN ./poller.go:649 > init collector-object (Zapi:NetPort): no metrics => failed to parse any\n

        This means the collector, Zapi NetPort, was unable to find any metrics. Recall metrics are lines without prefixes. In cases where you don't have any metrics, but still want to collect labels, add the collect_only_labels: true key-value to your template. This flag tells Harvest to ignore that you don't have metrics and continue. Example.

        "},{"location":"resources/templates-and-metrics/#2-missing-data","title":"2. Missing Data","text":"
        1. What happens if an attribute is listed in the list of instance_labels (line 43 above), but that label is missing from the list of counters captured at line 5?

        The label will still be written into disk_labels, but the value will be empty since it's missing. e.g if line 29 was deleted container_type would still be present in disk_labels{container_type=\"\"}.

        "},{"location":"resources/templates-and-metrics/#prometheus-wire-format","title":"Prometheus Wire Format","text":"

        https://prometheus.io/docs/instrumenting/exposition_formats/

        Keep in mind that Prometheus does not permit dashes (-) in labels. That's why Harvest templates use name replacement to convert dashed-names to underscored-names with =>. e.g. bytes-per-sector => bytes_per_sector converts bytes-per-sector into the Prometheus accepted bytes_per_sector.

        Every time series is uniquely identified by its metric name and optional key-value pairs called labels.

        Labels enable Prometheus's dimensional data model: any combination of labels for the same metric name identifies a particular dimensional instantiation of that metric (for example: all HTTP requests that used the method POST to the /api/tracks handler). The query language allows filtering and aggregation based on these dimensions. Changing any label value, including adding or removing a label, will create a new time series.

        <metric_name>{<label_name>=<label_value>, ...} value [ timestamp ]

        • metric_name and label_name carry the usual Prometheus expression language restrictions
        • label_value can be any sequence of UTF-8 characters, but the backslash (), double-quote (\"), and line feed (\\n) characters have to be escaped as \\, \\\", and \\n, respectively.
        • value is a float represented as required by Go's ParseFloat() function. In addition to standard numerical values, NaN, +Inf, and -Inf are valid values representing not a number, positive infinity, and negative infinity, respectively.
        • timestamp is an int64 (milliseconds since epoch, i.e. 1970-01-01 00:00:00 UTC, excluding leap seconds), represented as required by Go's ParseInt() function

        Exposition formats

        "},{"location":"resources/zapi-and-rest-gap/","title":"ZAPI and REST Gaps","text":""},{"location":"resources/zapi-and-rest-gap/#volume-count-difference","title":"Volume Count difference","text":"

        The REST and ZAPI collectors return a different number of volume_labels depending on whether you have set up object store servers on your cluster.

        • The REST collector does not include volume_labels for volumes associated with object store servers.
        • The ZAPI collector includes volume_labels for volumes associated with object store servers. If you have not set up any object store servers on your cluster, both collectors will return the same number of volume_labels.
        "}]} \ No newline at end of file diff --git a/nightly/sitemap.xml.gz b/nightly/sitemap.xml.gz index d0f7209077c99bacd6803e3c2470b8c10f4cf498..412ed8ec27bd238c4d3477e04daab946909b4a8a 100644 GIT binary patch delta 13 Ucmb=gXP58h;F!z(aw2;L038$rVE_OC delta 13 Ucmb=gXP58h;9%MHd?I@V036{2bpQYW