diff --git a/clear/main.py b/clear/main.py index 5ffcd2dba4..38dca2737f 100755 --- a/clear/main.py +++ b/clear/main.py @@ -229,16 +229,38 @@ def watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) @watermark.command('headroom') -def clear_wm_pg_headroom(): +def clear_wm_pg_headroom(namespace): """Clear user headroom WM for pg""" command = ['watermarkstat', '-c', '-t', 'pg_headroom'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('shared') -def clear_wm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_pg_shared(namespace): """Clear user shared WM for pg""" command = ['watermarkstat', '-c', '-t', 'pg_shared'] + if namespace: + command += ['-n', str(namespace)] run_command(command) @priority_group.group() @@ -261,16 +283,38 @@ def persistent_watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @persistent_watermark.command('headroom') -def clear_pwm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_pg_headroom(namespace): """Clear persistent headroom WM for pg""" command = ['watermarkstat', '-c', '-p', '-t', 'pg_headroom'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('shared') -def clear_pwm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_pg_shared(namespace): """Clear persistent shared WM for pg""" command = ['watermarkstat', '-c', '-p', '-t', 'pg_shared'] + if namespace: + command += ['-n', str(namespace)] run_command(command) @@ -285,69 +329,159 @@ def watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @watermark.command('unicast') -def clear_wm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_uni(namespace): """Clear user WM for unicast queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_uni'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('multicast') -def clear_wm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_multi(namespace): """Clear user WM for multicast queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_multi'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('all') -def clear_wm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_all(namespace): """Clear user WM for all queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_all'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @queue.group(name='persistent-watermark') def persistent_watermark(): """Clear queue persistent WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @persistent_watermark.command('unicast') -def clear_pwm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_uni(namespace): """Clear persistent WM for persistent queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_uni'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('multicast') -def clear_pwm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_multi(namespace): """Clear persistent WM for multicast queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_multi'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('all') -def clear_pwm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_all(namespace): """Clear persistent WM for all queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_all'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @cli.group(name='headroom-pool') def headroom_pool(): """Clear headroom pool WM""" pass + @headroom_pool.command('watermark') -def watermark(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def watermark(namespace): """Clear headroom pool user WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") command = ['watermarkstat', '-c', '-t', 'headroom_pool'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @headroom_pool.command('persistent-watermark') -def persistent_watermark(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def persistent_watermark(namespace): """Clear headroom pool persistent WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") command = ['watermarkstat', '-c', '-p', '-t', 'headroom_pool'] + if namespace: + command += ['-n', str(namespace)] run_command(command) # diff --git a/config/chassis_modules.py b/config/chassis_modules.py index 4e7fd8096b..5f70ef404a 100755 --- a/config/chassis_modules.py +++ b/config/chassis_modules.py @@ -72,7 +72,7 @@ def fabric_module_set_admin_status(db, chassis_module_name, state): if state == "down": for asic in asic_list: click.echo("Stop swss@{} and peer services".format(asic)) - clicommon.run_command('sudo systemctl stop swss@{}.service'.format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'stop', 'swss@{}.service'.format(asic)]) is_active = subprocess.call(["systemctl", "is-active", "--quiet", "swss@{}.service".format(asic)]) @@ -89,13 +89,13 @@ def fabric_module_set_admin_status(db, chassis_module_name, state): # without bring down the hardware for asic in asic_list: # To address systemd service restart limit by resetting the count - clicommon.run_command('sudo systemctl reset-failed swss@{}.service'.format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'reset-failed', 'swss@{}.service'.format(asic)]) click.echo("Start swss@{} and peer services".format(asic)) - clicommon.run_command('sudo systemctl start swss@{}.service'.format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'start', 'swss@{}.service'.format(asic)]) elif state == "up": for asic in asic_list: click.echo("Start swss@{} and peer services".format(asic)) - clicommon.run_command('sudo systemctl start swss@{}.service'.format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'start', 'swss@{}.service'.format(asic)]) # # 'shutdown' subcommand ('config chassis_modules shutdown ...') diff --git a/config/main.py b/config/main.py index 054edcf821..401ed75680 100644 --- a/config/main.py +++ b/config/main.py @@ -17,6 +17,7 @@ import itertools import copy import tempfile +import sonic_yang from jsonpatch import JsonPatchConflict from jsonpointer import JsonPointerException @@ -42,6 +43,7 @@ from utilities_common.general import load_db_config, load_module_from_source from .validated_config_db_connector import ValidatedConfigDBConnector import utilities_common.multi_asic as multi_asic_util +from utilities_common.flock import try_lock from .utils import log @@ -58,7 +60,7 @@ from . import vlan from . import vxlan from . import plugins -from .config_mgmt import ConfigMgmtDPB, ConfigMgmt +from .config_mgmt import ConfigMgmtDPB, ConfigMgmt, YANG_DIR from . import mclag from . import syslog from . import switchport @@ -124,6 +126,12 @@ GRE_TYPE_RANGE = click.IntRange(min=0, max=65535) ADHOC_VALIDATION = True +if os.environ.get("UTILITIES_UNIT_TESTING", "0") in ("1", "2"): + temp_system_reload_lockfile = tempfile.NamedTemporaryFile() + SYSTEM_RELOAD_LOCK = temp_system_reload_lockfile.name +else: + SYSTEM_RELOAD_LOCK = "/etc/sonic/reload.lock" + # Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') @@ -1753,9 +1761,11 @@ def list_checkpoints(ctx, verbose): @click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') @click.option('-f', '--force', default=False, is_flag=True, help='Force config reload without system checks') @click.option('-t', '--file_format', default='config_db',type=click.Choice(['config_yang', 'config_db']),show_default=True,help='specify the file format') +@click.option('-b', '--bypass-lock', default=False, is_flag=True, help='Do reload without acquiring lock') @click.argument('filename', required=False) @clicommon.pass_db -def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_format): +@try_lock(SYSTEM_RELOAD_LOCK, timeout=0) +def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_format, bypass_lock): """Clear current configuration and import a previous saved config DB dump file. : Names of configuration file(s) to load, separated by comma with no spaces in between """ @@ -1968,8 +1978,10 @@ def load_mgmt_config(filename): @click.option('-t', '--traffic_shift_away', default=False, is_flag=True, help='Keep device in maintenance with TSA') @click.option('-o', '--override_config', default=False, is_flag=True, help='Enable config override. Proceed with default path.') @click.option('-p', '--golden_config_path', help='Provide golden config path to override. Use with --override_config') +@click.option('-b', '--bypass-lock', default=False, is_flag=True, help='Do load minigraph without acquiring lock') @clicommon.pass_db -def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path): +@try_lock(SYSTEM_RELOAD_LOCK, timeout=0) +def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path, bypass_lock): """Reconfigure based on minigraph.""" argv_str = ' '.join(['config', *sys.argv[1:]]) log.log_notice(f"'load_minigraph' executing with command: {argv_str}") @@ -1983,8 +1995,22 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, fg='magenta') raise click.Abort() - # Dependency check golden config json config_to_check = read_json_file(golden_config_path) + if multi_asic.is_multi_asic(): + # Multiasic has not 100% fully validated. Thus pass here. + pass + else: + sy = sonic_yang.SonicYang(YANG_DIR) + sy.loadYangModel() + try: + sy.loadData(configdbJson=config_to_check) + sy.validate_data_tree() + except sonic_yang.SonicYangException as e: + click.secho("{} fails YANG validation! Error: {}".format(golden_config_path, str(e)), + fg='magenta') + raise click.Abort() + + # Dependency check golden config json if multi_asic.is_multi_asic(): host_config = config_to_check.get('localhost', {}) else: @@ -2311,7 +2337,7 @@ def aaa_table_hard_dependency_check(config_json): tacacs_enable = "tacacs+" in aaa_authentication_login.split(",") tacplus_passkey = TACPLUS_TABLE.get("global", {}).get("passkey", "") if tacacs_enable and len(tacplus_passkey) == 0: - click.secho("Authentication with 'tacacs+' is not allowed when passkey not exits.", fg="magenta") + click.secho("Authentication with 'tacacs+' is not allowed when passkey not exists.", fg="magenta") sys.exit(1) @@ -2365,6 +2391,20 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) + +# +# 'suppress-fib-pending' command ('config suppress-fib-pending ...') +# +@config.command('suppress-fib-pending') +@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) +@clicommon.pass_db +def suppress_pending_fib(db, state): + ''' Enable or disable pending FIB suppression. Once enabled, + BGP will not advertise routes that are not yet installed in the hardware ''' + + config_db = db.cfgdb + config_db.mod_entry('DEVICE_METADATA', 'localhost', {"suppress-fib-pending": state}) + # # 'yang_config_validation' command ('config yang_config_validation ...') # @@ -3116,7 +3156,7 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose) _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() sonic_version_file = device_info.get_sonic_version_file() - from_db = ['-d', '--write-to-db'] + from_db = ['-d'] if dry_run: from_db = ['--additional-data'] + [str(json_data)] if json_data else [] @@ -3162,11 +3202,27 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose) ) if os.path.isfile(qos_template_file): cmd_ns = [] if ns is DEFAULT_NAMESPACE else ['-n', str(ns)] - fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db" - command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + ['-t', '{},{}'.format(buffer_template_file, fname), '-t', '{},{}'.format(qos_template_file, fname), '-y', sonic_version_file] - # Apply the configurations only when both buffer and qos - # configuration files are present + buffer_fname = "/tmp/cfg_buffer{}.json".format(asic_id_suffix) + qos_fname = "/tmp/cfg_qos{}.json".format(asic_id_suffix) + + command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + [ + '-t', '{},{}'.format(buffer_template_file, buffer_fname), + '-t', '{},{}'.format(qos_template_file, qos_fname), + '-y', sonic_version_file + ] clicommon.run_command(command, display_cmd=True) + + command = [SONIC_CFGGEN_PATH] + cmd_ns + ["-j", buffer_fname, "-j", qos_fname] + if dry_run: + out, rc = clicommon.run_command(command + ["--print-data"], display_cmd=True, return_cmd=True) + if rc != 0: + # clicommon.run_command does this by default when rc != 0 and return_cmd=False + sys.exit(rc) + with open("{}{}".format(dry_run, asic_id_suffix), 'w') as f: + json.dump(json.loads(out), f, sort_keys=True, indent=4) + else: + clicommon.run_command(command + ["--write-to-db"], display_cmd=True) + else: click.secho("QoS definition template not found at {}".format( qos_template_file @@ -6388,7 +6444,8 @@ def remove_reasons(counter_name, reasons, verbose): @click.option('-ydrop', metavar='', type=click.IntRange(0, 100), help="Set yellow drop probability") @click.option('-gdrop', metavar='', type=click.IntRange(0, 100), help="Set green drop probability") @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") -def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose, namespace): """ECN-related configuration tasks""" log.log_info("'ecn -profile {}' executing...".format(profile)) command = ['ecnconfig', '-p', str(profile)] @@ -6402,6 +6459,8 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbos if ydrop is not None: command += ['-ydrop', str(ydrop)] if gdrop is not None: command += ['-gdrop', str(gdrop)] if verbose: command += ["-vv"] + if namespace is not None: + command += ['-n', str(namespace)] clicommon.run_command(command, display_cmd=verbose) @@ -6411,13 +6470,26 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbos @config.command() @click.option('-p', metavar='', type=str, required=True, help="Profile name") @click.option('-a', metavar='', type=click.IntRange(-8,8), help="Set alpha for profile type dynamic") -@click.option('-s', metavar='', type=int, help="Set staticth for profile type static") -def mmu(p, a, s): +@click.option('-s', metavar='', type=click.IntRange(min=0), help="Set staticth for profile type static") +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def mmu(p, a, s, namespace, verbose): """mmuconfig configuration tasks""" log.log_info("'mmuconfig -p {}' executing...".format(p)) command = ['mmuconfig', '-p', str(p)] if a is not None: command += ['-a', str(a)] if s is not None: command += ['-s', str(s)] + if namespace is not None: + command += ['-n', str(namespace)] + if verbose: + command += ['-vv'] clicommon.run_command(command) @@ -6439,8 +6511,9 @@ def pfc(ctx): @pfc.command() @click.argument('interface_name', metavar='', required=True) @click.argument('status', type=click.Choice(['on', 'off'])) +@multi_asic_util.multi_asic_click_option_namespace @click.pass_context -def asymmetric(ctx, interface_name, status): +def asymmetric(ctx, interface_name, status, namespace): """Set asymmetric PFC configuration.""" # Get the config_db connector config_db = ctx.obj['config_db'] @@ -6450,7 +6523,11 @@ def asymmetric(ctx, interface_name, status): if interface_name is None: ctx.fail("'interface_name' is None!") - clicommon.run_command(['pfc', 'config', 'asymmetric', str(status), str(interface_name)]) + cmd = ['pfc', 'config', 'asymmetric', str(status), str(interface_name)] + if namespace is not None: + cmd += ['-n', str(namespace)] + + clicommon.run_command(cmd) # # 'pfc priority' command ('config interface pfc priority ...') @@ -6460,8 +6537,9 @@ def asymmetric(ctx, interface_name, status): @click.argument('interface_name', metavar='', required=True) @click.argument('priority', type=click.Choice([str(x) for x in range(8)])) @click.argument('status', type=click.Choice(['on', 'off'])) +@multi_asic_util.multi_asic_click_option_namespace @click.pass_context -def priority(ctx, interface_name, priority, status): +def priority(ctx, interface_name, priority, status, namespace): """Set PFC priority configuration.""" # Get the config_db connector config_db = ctx.obj['config_db'] @@ -6471,7 +6549,11 @@ def priority(ctx, interface_name, priority, status): if interface_name is None: ctx.fail("'interface_name' is None!") - clicommon.run_command(['pfc', 'config', 'priority', str(status), str(interface_name), str(priority)]) + cmd = ['pfc', 'config', 'priority', str(status), str(interface_name), str(priority)] + if namespace is not None: + cmd += ['-n', str(namespace)] + + clicommon.run_command(cmd) # # 'buffer' group ('config buffer ...') @@ -7905,6 +7987,72 @@ def notice(db, category_list, max_events, namespace): handle_asic_sdk_health_suppress(db, 'notice', category_list, max_events, namespace) +# +# 'serial_console' group ('config serial_console') +# +@config.group(cls=clicommon.AbbreviationGroup, name='serial_console') +def serial_console(): + """Configuring system serial-console behavior""" + pass + + +@serial_console.command('sysrq-capabilities') +@click.argument('sysrq_capabilities', metavar='', required=True, + type=click.Choice(['enabled', 'disabled'])) +def sysrq_capabilities(sysrq_capabilities): + """Set serial console sysrq-capabilities state""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SERIAL_CONSOLE", 'POLICIES', + {'sysrq_capabilities': sysrq_capabilities}) + + +@serial_console.command('inactivity-timeout') +@click.argument('inactivity_timeout', metavar='', required=True, + type=click.IntRange(0, 35000)) +def inactivity_timeout_serial(inactivity_timeout): + """Set serial console inactivity timeout""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SERIAL_CONSOLE", 'POLICIES', + {'inactivity_timeout': inactivity_timeout}) + + +# +# 'ssh' group ('config ssh') +# +@config.group(cls=clicommon.AbbreviationGroup, name='ssh') +def ssh(): + """Configuring system ssh behavior""" + pass + + +@ssh.command('inactivity-timeout') +@click.argument('inactivity_timeout', metavar='', required=True, + type=click.IntRange(0, 35000)) +def inactivity_timeout_ssh(inactivity_timeout): + """Set ssh inactivity timeout""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SSH_SERVER", 'POLICIES', + {'inactivity_timeout': inactivity_timeout}) + + +@ssh.command('max-sessions') +@click.argument('max-sessions', metavar='', required=True, + type=click.IntRange(0, 100)) +def max_sessions(max_sessions): + """Set max number of concurrent logins""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SSH_SERVER", 'POLICIES', + {'max_sessions': max_sessions}) + + # # 'banner' group ('config banner ...') # diff --git a/counterpoll/main.py b/counterpoll/main.py index ad15c8c248..530281188f 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -3,17 +3,29 @@ from flow_counter_util.route import exit_if_route_flow_counter_not_support from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate +from sonic_py_common import device_info BUFFER_POOL_WATERMARK = "BUFFER_POOL_WATERMARK" PORT_BUFFER_DROP = "PORT_BUFFER_DROP" PG_DROP = "PG_DROP" ACL = "ACL" +ENI = "ENI" DISABLE = "disable" ENABLE = "enable" DEFLT_60_SEC= "default (60000)" DEFLT_10_SEC= "default (10000)" DEFLT_1_SEC = "default (1000)" + +def is_dpu(db): + """ Check if the device is DPU """ + platform_info = device_info.get_platform_info(db) + if platform_info.get('switch_type') == 'dpu': + return True + else: + return False + + @click.group() def cli(): """ SONiC Static Counter Poll configurations """ @@ -126,6 +138,7 @@ def disable(): port_info['FLEX_COUNTER_STATUS'] = DISABLE configdb.mod_entry("FLEX_COUNTER_TABLE", PORT_BUFFER_DROP, port_info) + # Ingress PG drop packet stat @cli.group() @click.pass_context @@ -382,6 +395,47 @@ def disable(ctx): fc_info['FLEX_COUNTER_STATUS'] = 'disable' ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + +# ENI counter commands +@cli.group() +@click.pass_context +def eni(ctx): + """ ENI counter commands """ + ctx.obj = ConfigDBConnector() + ctx.obj.connect() + if not is_dpu(ctx.obj): + click.echo("ENI counters are not supported on non DPU platforms") + exit(1) + + +@eni.command(name='interval') +@click.argument('poll_interval', type=click.IntRange(1000, 30000)) +@click.pass_context +def eni_interval(ctx, poll_interval): + """ Set eni counter query interval """ + eni_info = {} + eni_info['POLL_INTERVAL'] = poll_interval + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + +@eni.command(name='enable') +@click.pass_context +def eni_enable(ctx): + """ Enable eni counter query """ + eni_info = {} + eni_info['FLEX_COUNTER_STATUS'] = 'enable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + +@eni.command(name='disable') +@click.pass_context +def eni_disable(ctx): + """ Disable eni counter query """ + eni_info = {} + eni_info['FLEX_COUNTER_STATUS'] = 'disable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + @cli.command() def show(): """ Show the counter configuration """ @@ -399,6 +453,7 @@ def show(): tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL') trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP') route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE') + eni_info = configdb.get_entry('FLEX_COUNTER_TABLE', ENI) header = ("Type", "Interval (in ms)", "Status") data = [] @@ -428,6 +483,10 @@ def show(): data.append(["FLOW_CNT_ROUTE_STAT", route_info.get("POLL_INTERVAL", DEFLT_10_SEC), route_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + if is_dpu(config_db) and eni_info: + data.append(["ENI_STAT", eni_info.get("POLL_INTERVAL", DEFLT_10_SEC), + eni_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) def _update_config_db_flex_counter_table(status, filename): diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index ac0e20bac0..25df7f6528 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -43,6 +43,7 @@ * [Console config commands](#console-config-commands) * [Console connect commands](#console-connect-commands) * [Console clear commands](#console-clear-commands) + * [DPU serial console utility](#dpu-serial-console-utility) * [CMIS firmware upgrade](#cmis-firmware-upgrade) * [CMIS firmware version show commands](#cmis-firmware-version-show-commands) * [CMIS firmware upgrade commands](#cmis-firmware-upgrade-commands) @@ -232,6 +233,7 @@ | Version | Modification Date | Details | | --- | --- | --- | +| v9 | Sep-19-2024 | Add DPU serial console utility | | v8 | Oct-09-2023 | Add CMIS firmware upgrade commands | | v7 | Jun-22-2023 | Add static DNS show and config commands | | v6 | May-06-2021 | Add SNMP show and config commands | @@ -2615,6 +2617,26 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` +**show suppress-fib-pending** + +This command is used to show the status of suppress pending FIB feature. +When enabled, BGP will not advertise routes which aren't yet offloaded. + +- Usage: + ``` + show suppress-fib-pending + ``` + +- Examples: + ``` + admin@sonic:~$ show suppress-fib-pending + Enabled + ``` + ``` + admin@sonic:~$ show suppress-fib-pending + Disabled + ``` + **show bgp device-global** This command displays BGP device global configuration. @@ -2727,6 +2749,24 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio admin@sonic:~$ sudo config bgp remove neighbor SONIC02SPINE ``` +**config suppress-fib-pending** + +This command is used to enable or disable announcements of routes not yet installed in the HW. +Once enabled, BGP will not advertise routes which aren't yet offloaded. + +- Usage: + ``` + config suppress-fib-pending + ``` + +- Examples: + ``` + admin@sonic:~$ sudo config suppress-fib-pending enabled + ``` + ``` + admin@sonic:~$ sudo config suppress-fib-pending disabled + ``` + **config bgp device-global tsa/w-ecmp** This command is used to manage BGP device global configuration. @@ -2790,7 +2830,7 @@ Optionally, you can display configured console ports only by specifying the `-b` 1 9600 Enabled - - switch1 ``` -## Console config commands +### Console config commands This sub-section explains the list of configuration options available for console management module. @@ -2966,6 +3006,88 @@ Optionally, you can clear with a remote device name by specifying the `-d` or `- Go Back To [Beginning of the document](#) or [Beginning of this section](#console) +### DPU serial console utility + +**dpu-tty.py** + +This command allows user to connect to a DPU serial console via TTY device with +interactive CLI program: picocom. The configuration is from platform.json. The +utility works only on smart switch that provides DPU UART connections through +/dev/ttyS* devices. + +- Usage: + ``` + dpu-tty.py (-n|--name) [(-b|-baud) ] [(-t|-tty) ] + ``` + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu0 + picocom v3.1 + + port is : /dev/ttyS4 + flowcontrol : none + baudrate is : 115200 + parity is : none + databits are : 8 + stopbits are : 1 + escape is : C-a + local echo is : no + noinit is : no + noreset is : no + hangup is : no + nolock is : no + send_cmd is : sz -vv + receive_cmd is : rz -vv -E + imap is : + omap is : + emap is : crcrlf,delbs, + logfile is : none + initstring : none + exit_after is : not set + exit is : no + + Type [C-a] [C-h] to see available commands + Terminal ready + + sonic login: admin + Password: + Linux sonic 6.1.0-11-2-arm64 #1 SMP Debian 6.1.38-4 (2023-08-08) aarch64 + You are on + ____ ___ _ _ _ ____ + / ___| / _ \| \ | (_)/ ___| + \___ \| | | | \| | | | + ___) | |_| | |\ | | |___ + |____/ \___/|_| \_|_|\____| + + -- Software for Open Networking in the Cloud -- + + Unauthorized access and/or use are prohibited. + All access and/or use are subject to monitoring. + + Help: https://sonic-net.github.io/SONiC/ + + Last login: Mon Sep 9 21:39:44 UTC 2024 on ttyS0 + admin@sonic:~$ + Terminating... + Thanks for using picocom + root@MtFuji:/home/cisco# + ``` + +Optionally, user may overwrite baud rate for experiment. + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu1 -b 9600 + ``` + +Optionally, user may overwrite TTY device for experiment. + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu2 -t ttyS4 + ``` + ## CMIS firmware upgrade ### CMIS firmware version show commands @@ -3109,19 +3231,19 @@ This command is the standard CMIS diagnostic control used for troubleshooting li - Usage: ``` - sfputil debug loopback PORT_NAME LOOPBACK_MODE + sfputil debug loopback PORT_NAME LOOPBACK_MODE - Set the loopback mode + Valid values for loopback mode host-side-input: host side input loopback mode host-side-output: host side output loopback mode media-side-input: media side input loopback mode media-side-output: media side output loopback mode - none: disable loopback mode ``` - Example: ``` - admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input + admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input enable + admin@sonic:~$ sfputil debug loopback Ethernet88 media-side-output disable ``` ## DHCP Relay @@ -4771,6 +4893,7 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte show interfaces counters errors show interfaces counters rates show interfaces counters rif [-p|--period ] [-i ] + show interfaces counters fec-histogram [-i ] ``` - Example: @@ -4888,6 +5011,39 @@ Optionally, you can specify a period (in seconds) with which to gather counters admin@sonic:~$ sonic-clear rifcounters ``` +The "fec-histogram" subcommand is used to display the fec histogram for the port. + +When data is transmitted, it's broken down into units called codewords. FEC algorithms add extra data to each codeword that can be used to detect and correct errors in transmission. +In a FEC histogram, "bins" represent ranges of errors or specific categories of errors. For instance, Bin0 might represent codewords with no errors, while Bin1 could represent codewords with a single bit error, and so on. The histogram shows how many codewords fell into each bin. A high number in the higher bins might indicate a problem with the transmission link, such as signal degradation. + +- Example: + ``` + admin@str-s6000-acs-11:/usr/bin$ show interface counters fec-histogram -i + +Symbol Errors Per Codeword Codewords +-------------------------- --------- +BIN0: 1000000 +BIN1: 900000 +BIN2: 800000 +BIN3: 700000 +BIN4: 600000 +BIN5: 500000 +BIN6: 400000 +BIN7: 300000 +BIN8: 0 +BIN9: 0 +BIN10: 0 +BIN11: 0 +BIN12: 0 +BIN13: 0 +BIN14: 0 +BIN15: 0 + + ``` + + + + **show interfaces description** This command displays the key fields of the interfaces such as Operational Status, Administrative Status, Alias and Description. diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index a379e7282f..c1921470d4 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,13 +22,18 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256", "ACS-SN5400" ], + "spc5": ["ACS-SN5640"] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], "th2": [ "Arista-7260CX3-D108C8", "Arista-7260CX3-C64", "Arista-7260CX3-Q64" ], + "th3": [ "Nokia-IXR7220-H3" ], + "th4": [ "Nokia-IXR7220-H4-64D", "Nokia-IXR7220-H4-32D" ], + "th5": [ "Nokia-IXR7220-H5-64D" ], "td2": [ "Force10-S6000", "Force10-S6000-Q24S32", "Arista-7050-QX32", "Arista-7050-QX-32S", "Nexus-3164", "Arista-7050QX32S-Q32" ], - "td3": [ "Arista-7050CX3-32S-C32", "Arista-7050CX3-32S-D48C8" ] + "td3": [ "Arista-7050CX3-32S-C32", "Arista-7050CX3-32S-D48C8" ], + "j2c+": [ "Nokia-IXR7250E-36x100G", "Nokia-IXR7250E-36x400G" ] } } }, @@ -54,7 +59,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -80,7 +89,11 @@ "td2": "", "th": "20221100", "th2": "20221100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20221100", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -104,7 +117,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } }, @@ -121,7 +138,11 @@ "td2": "", "th": "20221100", "th2": "20221100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20221100", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -147,7 +168,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } } diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 938aa1d034..452bad1ee7 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -239,7 +239,8 @@ def validate_lanes(self, config_db): for port in port_to_lanes_map: lanes = port_to_lanes_map[port] for lane in lanes: - if lane in existing: + # default lane would be 0, it does not need validate duplication. + if lane in existing and lane != '0': return False, f"'{lane}' lane is used multiple times in PORT: {set([port, existing[lane]])}" existing[lane] = port return True, None diff --git a/mmuconfig b/mmuconfig deleted file mode 100755 index f9dc178625..0000000000 --- a/mmuconfig +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python3 - -""" -mmuconfig is the utility to show and change mmu configuration - -usage: mmuconfig [-h] [-v] [-l] [-p PROFILE] [-a ALPHA] [-s staticth] [-vv] - -optional arguments: - -h --help show this help message and exit - -v --version show program's version number and exit - -vv --verbose verbose output - -l --list show mmu configuration - -p --profile specify buffer profile name - -a --alpha set n for dyanmic threshold alpha 2^(n) - -s --staticth set static threshold - -""" - -import os -import sys -import argparse -import tabulate -import traceback - -BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" -BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" -DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME = "DEFAULT_LOSSLESS_BUFFER_PARAMETER" - -DYNAMIC_THRESHOLD = "dynamic_th" -STATIC_THRESHOLD = "static_th" -BUFFER_PROFILE_FIELDS = { - "alpha": DYNAMIC_THRESHOLD, - "staticth": STATIC_THRESHOLD -} - -# mock the redis for unit test purposes # -try: - if os.environ["UTILITIES_UNIT_TESTING"] == "2": - modules_path = os.path.join(os.path.dirname(__file__), "..") - tests_path = os.path.join(modules_path, "tests") - sys.path.insert(0, modules_path) - sys.path.insert(0, tests_path) - import mock_tables.dbconnector - -except KeyError: - pass - -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector - -BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" -BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" - -''' -DYNAMIC_THRESHOLD = "dynamic_th" -BUFFER_PROFILE_FIELDS = { - "alpha": DYNAMIC_THRESHOLD -} -''' - -class MmuConfig(object): - def __init__(self, verbose, config): - self.verbose = verbose - self.config = config - - # Set up db connections - if self.config: - self.db = ConfigDBConnector() - self.db.connect() - else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.STATE_DB, False) - - def get_table(self, tablename): - if self.config: - return self.db.get_table(tablename) - - entries = {} - keys = self.db.keys(self.db.STATE_DB, tablename + '*') - - if not keys: - return None - - for key in keys: - entries[key.split('|')[1]] = self.db.get_all(self.db.STATE_DB, key) - - return entries - - def list(self): - lossless_traffic_pattern = self.get_table(DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME) - if lossless_traffic_pattern: - for _, pattern in lossless_traffic_pattern.items(): - config = [] - - print("Lossless traffic pattern:") - for field, value in pattern.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - - buf_pools = self.get_table(BUFFER_POOL_TABLE_NAME) - if buf_pools: - for pool_name, pool_data in buf_pools.items(): - config = [] - - print("Pool: " + pool_name) - for field, value in pool_data.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - if self.verbose: - print("Total pools: %d\n\n" % len(buf_pools)) - else: - print("No buffer pool information available") - - buf_profs = self.get_table(BUFFER_PROFILE_TABLE_NAME) - if buf_profs: - for prof_name, prof_data in buf_profs.items(): - config = [] - - print("Profile: " + prof_name) - for field, value in prof_data.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - if self.verbose: - print("Total profiles: %d" % len(buf_profs)) - else: - print("No buffer profile information available") - - def set(self, profile, field_alias, value): - if os.geteuid() != 0: - sys.exit("Root privileges required for this operation") - - field = BUFFER_PROFILE_FIELDS[field_alias] - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - v = int(value) - if field == DYNAMIC_THRESHOLD: - if v < -8 or v > 8: - sys.exit("Invalid alpha value: 2^(%s)" % (value)) - - if profile in buf_profs and DYNAMIC_THRESHOLD not in buf_profs[profile]: - sys.exit("%s not using dynamic thresholding" % (profile)) - elif field == STATIC_THRESHOLD: - if v < 0: - sys.exit("Invalid static threshold value: (%s)" % (value)) - - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - if profile in buf_profs and STATIC_THRESHOLD not in buf_profs[profile]: - sys.exit("%s not using static threshold" % (profile)) - else: - sys.exit("Set field %s not supported" % (field)) - - if self.verbose: - print("Setting %s %s value to %s" % (profile, field, value)) - self.db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) - - -def main(config): - if config: - parser = argparse.ArgumentParser(description='Show and change: mmu configuration', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show mmu configuration') - parser.add_argument('-p', '--profile', type=str, help='specify buffer profile name', default=None) - parser.add_argument('-a', '--alpha', type=str, help='set n for dyanmic threshold alpha 2^(n)', default=None) - parser.add_argument('-s', '--staticth', type=str, help='set n for static threshold', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - else: - parser = argparse.ArgumentParser(description='Show buffer state', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show buffer state') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - parser.add_argument('-vv', '--verbose', action='store_true', help='verbose output', default=False) - - args = parser.parse_args() - - try: - mmu_cfg = MmuConfig(args.verbose, config) - if args.list: - mmu_cfg.list() - elif config and args.profile: - import pdb; pdb.set_trace() - if args.alpha: - mmu_cfg.set(args.profile, "alpha", args.alpha) - elif args.staticth: - mmu_cfg.set(args.profile, "staticth", args.staticth) - else: - parser.print_help() - sys.exit(1) - - except Exception as e: - print("Exception caught: ", str(e), file=sys.stderr) - traceback.print_exc() - sys.exit(1) - -if __name__ == "__main__": - if sys.argv[0].split('/')[-1] == "mmuconfig": - main(True) - else: - main(False) diff --git a/pfc/main.py b/pfc/main.py index f0b376e242..071b4a304e 100644 --- a/pfc/main.py +++ b/pfc/main.py @@ -1,39 +1,63 @@ #!/usr/bin/env python3 +import os import click -from swsscommon.swsscommon import ConfigDBConnector +import json +from sonic_py_common import multi_asic from tabulate import tabulate from natsort import natsorted +from utilities_common import multi_asic as multi_asic_util +# Constants ALL_PRIORITIES = [str(x) for x in range(8)] PRIORITY_STATUS = ['on', 'off'] +PORT_TABLE_NAME = "PORT" +PORT_QOS_MAP_TABLE_NAME = "PORT_QOS_MAP" class Pfc(object): - def __init__(self, cfgdb=None): - self.cfgdb = cfgdb + def __init__(self, namespace=None): + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + # For unit testing + self.updated_port_tables = {} + self.test_filename = '/tmp/pfc_testdata.json' + + def dump_config_to_json(self, table_name, namespace): + """ + This function dumps the current config in a JSON file for unit testing. + """ + # Only dump files in unit testing mode + if os.environ["UTILITIES_UNIT_TESTING"] != "2": + return + + if namespace not in self.updated_port_tables.keys(): + self.updated_port_tables[namespace] = {} + + self.updated_port_tables[namespace][table_name] = self.config_db.get_table(table_name) + with open(self.test_filename, "w") as fd: + json.dump(self.updated_port_tables, fd) + + @multi_asic_util.run_on_multi_asic def configPfcAsym(self, interface, pfc_asym): """ PFC handler to configure asymmetric PFC. """ - configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb - configdb.connect() - - configdb.mod_entry("PORT", interface, {'pfc_asym': pfc_asym}) + self.config_db.mod_entry(PORT_TABLE_NAME, interface, {'pfc_asym': pfc_asym}) + self.dump_config_to_json(PORT_TABLE_NAME, self.multi_asic.current_namespace) + @multi_asic_util.run_on_multi_asic def showPfcAsym(self, interface): """ PFC handler to display asymmetric PFC information. """ + namespace_str = f"Namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' header = ('Interface', 'Asymmetric') - configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb - configdb.connect() - if interface: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface)) + db_keys = self.config_db.keys(self.config_db.CONFIG_DB, 'PORT|{0}'.format(interface)) else: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*') + db_keys = self.config_db.keys(self.config_db.CONFIG_DB, 'PORT|*') table = [] @@ -43,36 +67,35 @@ def showPfcAsym(self, interface): key = i.split('|')[-1] if key and key.startswith('Ethernet'): - entry = configdb.get_entry('PORT', key) + entry = self.config_db.get_entry(PORT_TABLE_NAME, key) table.append([key, entry.get('pfc_asym', 'N/A')]) sorted_table = natsorted(table) - click.echo() + click.echo(namespace_str) click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) click.echo() + @multi_asic_util.run_on_multi_asic def configPfcPrio(self, status, interface, priority): - configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb - configdb.connect() - - if interface not in configdb.get_keys('PORT_QOS_MAP'): + if interface not in self.config_db.get_keys(PORT_QOS_MAP_TABLE_NAME): click.echo('Cannot find interface {0}'.format(interface)) return """Current lossless priorities on the interface""" - entry = configdb.get_entry('PORT_QOS_MAP', interface) + entry = self.config_db.get_entry('PORT_QOS_MAP', interface) enable_prio = entry.get('pfc_enable').split(',') """Avoid '' in enable_prio""" enable_prio = [x.strip() for x in enable_prio if x.strip()] + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' if status == 'on' and priority in enable_prio: - click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface)) + click.echo('Priority {0} has already been enabled on {1}{2}'.format(priority, interface, namespace_str)) return if status == 'off' and priority not in enable_prio: - click.echo('Priority {0} is not enabled on {1}'.format(priority, interface)) + click.echo('Priority {0} is not enabled on {1}{2}'.format(priority, interface, namespace_str)) return if status == 'on': @@ -82,11 +105,10 @@ def configPfcPrio(self, status, interface, priority): enable_prio.remove(priority) enable_prio.sort() - configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) + self.config_db.mod_entry(PORT_QOS_MAP_TABLE_NAME, interface, {'pfc_enable': ','.join(enable_prio)}) + self.dump_config_to_json(PORT_QOS_MAP_TABLE_NAME, self.multi_asic.current_namespace) - """Show the latest PFC configuration""" - self.showPfcPrio(interface) - + @multi_asic_util.run_on_multi_asic def showPfcPrio(self, interface): """ PFC handler to display PFC enabled priority information. @@ -94,80 +116,82 @@ def showPfcPrio(self, interface): header = ('Interface', 'Lossless priorities') table = [] - configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb - configdb.connect() - """Get all the interfaces with QoS map information""" - intfs = configdb.get_keys('PORT_QOS_MAP') + intfs = self.config_db.get_keys('PORT_QOS_MAP') """The user specifies an interface but we cannot find it""" + namespace_str = f"Namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' if interface and interface not in intfs: - click.echo('Cannot find interface {0}'.format(interface)) + if multi_asic.is_multi_asic(): + click.echo('Cannot find interface {0} for {1}'.format(interface, namespace_str)) + else: + click.echo('Cannot find interface {0}'.format(interface)) return if interface: intfs = [interface] for intf in intfs: - entry = configdb.get_entry('PORT_QOS_MAP', intf) + entry = self.config_db.get_entry('PORT_QOS_MAP', intf) table.append([intf, entry.get('pfc_enable', 'N/A')]) sorted_table = natsorted(table) - click.echo() + click.echo(namespace_str) click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) click.echo() - + + @click.group() -@click.pass_context -def cli(ctx): +def cli(): """PFC Command Line""" - # Use the cfgdb object if given as input. - cfgdb = None if ctx.obj is None else ctx.obj.cfgdb - ctx.obj = {'pfc': Pfc(cfgdb)} @cli.group() -@click.pass_context -def config(ctx): +def config(): """Config PFC""" pass + @cli.group() -@click.pass_context -def show(ctx): +def show(): """Show PFC information""" pass + @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) -@click.pass_context -def configAsym(ctx, status, interface): +@multi_asic_util.multi_asic_click_option_namespace +def configAsym(status, interface, namespace): """Configure asymmetric PFC on a given port.""" - ctx.obj['pfc'].configPfcAsym(interface, status) + Pfc(namespace).configPfcAsym(interface, status) + @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) @click.argument('priority', type=click.Choice(ALL_PRIORITIES)) -@click.pass_context -def configPrio(ctx, status, interface, priority): +@multi_asic_util.multi_asic_click_option_namespace +def configPrio(status, interface, priority, namespace): """Configure PFC on a given priority.""" - ctx.obj['pfc'].configPfcPrio(status, interface, priority) + Pfc(namespace).configPfcPrio(status, interface, priority) + @click.command() @click.argument('interface', type=click.STRING, required=False) -@click.pass_context -def showAsym(ctx, interface): +@multi_asic_util.multi_asic_click_option_namespace +def showAsym(interface, namespace): """Show asymmetric PFC information""" - ctx.obj['pfc'].showPfcAsym(interface) + Pfc(namespace).showPfcAsym(interface) + @click.command() @click.argument('interface', type=click.STRING, required=False) -@click.pass_context -def showPrio(ctx, interface): +@multi_asic_util.multi_asic_click_option_namespace +def showPrio(interface, namespace): """Show PFC priority information""" - ctx.obj['pfc'].showPfcPrio(interface) + Pfc(namespace).showPfcPrio(interface) + config.add_command(configAsym, "asymmetric") config.add_command(configPrio, "priority") diff --git a/rcli/utils.py b/rcli/utils.py index e2f48788ba..7563eafdcd 100644 --- a/rcli/utils.py +++ b/rcli/utils.py @@ -1,7 +1,7 @@ import click -from getpass import getpass +import getpass import os -import sys +import signal from swsscommon.swsscommon import SonicV2Connector @@ -19,6 +19,8 @@ CHASSIS_MODULE_HOSTNAME_TABLE = 'CHASSIS_MODULE_HOSTNAME_TABLE' CHASSIS_MODULE_HOSTNAME = 'module_hostname' +GET_PASSWORD_TIMEOUT = 10 + def connect_to_chassis_state_db(): chassis_state_db = SonicV2Connector(host="127.0.0.1") chassis_state_db.connect(chassis_state_db.CHASSIS_STATE_DB) @@ -151,8 +153,17 @@ def get_password(username=None): if username is None: username = os.getlogin() - return getpass( + def get_password_timeout(*args): + print("\nAborted! Timeout when waiting for password input.") + exit(1) + + signal.signal(signal.SIGALRM, get_password_timeout) + signal.alarm(GET_PASSWORD_TIMEOUT) # Set a timeout of 60 seconds + password = getpass.getpass( "Password for username '{}': ".format(username), # Pass in click stdout stream - this is similar to using click.echo stream=click.get_text_stream('stdout') ) + signal.alarm(0) # Cancel the alarm + + return password diff --git a/scripts/debug_voq_chassis_packet_drops.sh b/scripts/debug_voq_chassis_packet_drops.sh new file mode 100755 index 0000000000..53e21c6f09 --- /dev/null +++ b/scripts/debug_voq_chassis_packet_drops.sh @@ -0,0 +1,371 @@ +#!/usr/bin/bash +# defaults for env vars +sleep_period=${sleep_period:-0} +maxiter=${maxiter:-25} # all but 4 iterations will be polling Egress drops +log=${log:-/dev/stdout} +time_format="%D %T.%6N" +delim="END" +# options +ing_check_mc=${ing_check_mc:-1} +ing_check_macsec=${ing_check_macsec:-1} +egr_check_mc=${egr_check_mc:-1} +egr_check_pmf_hit_bits=${egr_check_pmf_hit_bits:-1} +egr_diag_counter_g=${egr_diag_counter_g:-1} + +declare -a cores=("0" "1") +declare -a asics=("0" "1") +queue_pair_mask_a=(0 0 0 0) +dsp_map_a=(0 0 0 0) + +timestamp(){ + curr_time=$(date +"$time_format") + echo "$curr_time $logmsg" >> $log +} + +print_pqp_reasons() { + disc_reasons=$((16#${disc_reasons})) + if [ $disc_reasons -eq 0 ]; then echo "none" >> $log ; fi + if [ $(($disc_reasons & 1)) -ne 0 ] ; then echo "0- Total PDs threshold violated" >> $log ; fi + if [ $(($disc_reasons & 2)) -ne 0 ] ; then echo "1- Total PDs UC pool size threshold violated" >> $log ; fi + if [ $(($disc_reasons & 4)) -ne 0 ] ; then echo "2- Per port UC PDs threshold" >> $log ; fi + if [ $(($disc_reasons & 8)) -ne 0 ] ; then echo "3- Per queue UC PDs thresholds">> $log ; fi + if [ $(($disc_reasons & 16)) -ne 0 ] ; then echo "4- Per port UC DBs threshold">> $log ; fi + if [ $(($disc_reasons & 32)) -ne 0 ] ; then echo "5- Per queue UC DBs threshold">> $log ; fi + if [ $(($disc_reasons & 64)) -ne 0 ] ; then echo "6- Per queue disable bit">> $log ; fi + if [ $(($disc_reasons & 128)) -ne 0 ] ; then echo "7- Undefined">> $log ; fi + if [ $(($disc_reasons & 256)) -ne 0 ] ; then echo "8- Total PDs MC pool size threshold">> $log ; fi + if [ $(($disc_reasons & 512)) -ne 0 ] ; then echo "9- Per interface PDs threhold">> $log; fi + if [ $(($disc_reasons & 1024)) -ne 0 ] ; then echo "10- MC SP threshold">> $log ; fi + if [ $(($disc_reasons & 2048)) -ne 0 ] ; then echo "11- per MC-TC threshold">> $log ; fi + if [ $(($disc_reasons & 4096)) -ne 0 ] ; then echo "12- MC PDs per port threshold">> $log ; fi + if [ $(($disc_reasons & 8192)) -ne 0 ] ; then echo "13- MC PDs per queue threshold">> $log ; fi + if [ $(($disc_reasons & 16384)) -ne 0 ] ; then echo "14- MC per port size (bytes) threshold">> $log ; fi + if [ $(($disc_reasons & 32768)) -ne 0 ] ; then echo "15- MC per queue size(bytes) thresholds">> $log ; fi +} +print_rqp_reasons(){ + disc_reasons=$((16#${disc_reasons})) + if [ $disc_reasons -eq 0 ]; then echo "none" >> $log ; fi + if [ $(($disc_reasons & 1)) -ne 0 ] ; then echo "0- Total DBs threshold violated" >> $log ; fi + if [ $(($disc_reasons & 2)) -ne 0 ] ; then echo "1- Total UC DBs pool size threshold violated" >> $log ; fi + if [ $(($disc_reasons & 4)) -ne 0 ] ; then echo "2- UC packet discarded in EMR because UC FIFO is full" >> $log ; fi + if [ $(($disc_reasons & 8)) -ne 0 ] ; then echo "3- MC HP packetd discarded in EMR because MC FIFO is full">> $log ; fi + if [ $(($disc_reasons & 16)) -ne 0 ] ; then echo "4- MC LP packetd discarded in EMR because MC FIFO is full">> $log ; fi + if [ $(($disc_reasons & 32)) -ne 0 ] ; then echo "5- Total MC DBs pool size threshold violated">> $log ; fi + if [ $(($disc_reasons & 64)) -ne 0 ] ; then echo "6- Packet-DP is not eligible to take from shared DBs resources">> $log ; fi + if [ $(($disc_reasons & 128)) -ne 0 ] ; then echo "7- USP DBs threshold violated">> $log ; fi + if [ $(($disc_reasons & 256)) -ne 0 ] ; then echo "8- Discrete-Partitioning method: MC-TC DBs threshold violated">> $log ; fi + if [ $(($disc_reasons & 512)) -ne 0 ] ; then echo "9- Strict-priority method: MC-TC mapped to SP0 DBs threshold violated">> $log; fi + if [ $(($disc_reasons & 1024)) -ne 0 ] ; then echo "10- Strict-Priority method: MC-TC mapped to SP1 DBs threshold violated">> $log ; fi +} + +# whenever port_disabled mask change, print the up ports +# (according to the queue-pair mask and DSP port mapping, which is what matters ) + +check_new_port_state() { + last_queue_pair_mask=${queue_pair_mask_a[$index]} + queue_pair_mask=$(bcmcmd -n $asic "g hex ECGM_CGM_QUEUE_PAIR_DISABLED.ECGM${core}" | head -n +2 | tail -1) + if [ "$queue_pair_mask" == "$last_queue_pair_mask" ] ; then + return + fi + queue_pair_mask_a[$index]=$queue_pair_mask + logmsg="EGRESS_QPAIR asic $asic core $core new disabled mask: $queue_pair_mask" + timestamp + + start_dsp=$core + let amt=255-$core + dsp_map_a[$index]=$(bcmcmd -n $asic "d SCH_DSP_2_PORT_MAP_DSPP.SCH${core} $start_dsp $amt") + + hr_num=0 + for pos in {-3..-129..-2}; do # todo + byte=${queue_pair_mask:pos:2} + if [ $hr_num -le 8 ] ; then + hr_num_hex="HR_NUM=${hr_num}" + else + hr_num_hex=$(printf "HR_NUM=0x%x" $hr_num) + fi + hr_num=$(( hr_num + 8)) + entry=$(echo ${dsp_map_a[$index]} | sed -e "s/\r/\r\n/g" | grep -m 1 "$hr_num_hex") + found=$? + if [ $found -eq 1 ] ; then + continue + fi + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + if [ "$byte" = "ff" ]; then + printf "DOWN %3d ${entry}\n" $dsp_port >> $log + else + printf "UP %3d ${entry}\n" $dsp_port >> $log + fi + done + echo >> $log +} + +decode_last_rqp_drop() { + rqp_disc=$(bcmcmd -n $asic "g hex ECGM_RQP_DISCARD_REASONS.ECGM${core}" | head -n -1 | tail -1) + prefix=${rqp_disc: 0: 2} + if [ "$prefix" != "0x" ]; then + return; # empty (0) or a failed read + fi + logmsg="EGRESS_DROP RQP_DISCARD_REASONS asic $asic core $core index $index: $rqp_disc" + timestamp + disc_reasons=${rqp_disc: -4: 3} + print_rqp_reasons +} + +decode_last_pqp_drop() { + pqp_disc=$(bcmcmd -n $asic "g hex ECGM_PQP_DISCARD_REASONS.ECGM${core}" | head -n -1 | tail -1 ) + prefix=${pqp_disc: 0: 2} + if [ "$prefix" != "0x" ]; then + return; # empty (0) or a failed read + fi + logmsg="EGRESS_DROP PQP_DISCARD_REASONS asic $asic core $core: $pqp_disc" + timestamp + check_new_port_state # in case the DSP map has changed + disc_reasons=${pqp_disc: -5: 4} + last_reason=${pqp_disc: -9: 4} + drop_cmd=${pqp_disc: -19: 10} + queue=${drop_cmd: -8: 3} + queue=$((16#${queue})) + queue=$(($queue / 4 )) + queue=$(($queue & 248)) + hr_num_hex=$(printf "%02x" $queue) + entry=$(echo ${dsp_map_a[$index]} | sed -e "s/\r/\r\n/g" | grep -m 1 "$hr_num_hex") + found=$? + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + if [ $found -eq 1 ] ; then + echo "drop_reason 0x${disc_reasons} queue 0x${hr_num_hex} dsp_port not_found" >> $log + else + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + echo "drop_reason 0x${disc_reasons} queue 0x${hr_num_hex} dsp port $dsp_port" >> $log + fi + echo "pqp discard reasons (cumulative since last read):" >> $log + print_pqp_reasons + echo "pqp last packet discard reasons:" >> $log + disc_reasons=$last_reason + print_pqp_reasons + echo >> $log +} + + +clear_tcam_hit_bits() { + cint_filename="/tmp/hitbits" + cint=';print bcm_field_entry_hit_flush(0, BCM_FIELD_ENTRY_HIT_FLUSH_ALL, 0); exit;' + bcmcmd -n $asic "log off; rm $cint_filename;log file=$cint_filename quiet=yes; echo '$cint';log off;cint $cint_filename" >> /dev/null +} + +dump_tcam_drop_action_hits() { + echo "SAI_FG_TRAP hits:" >> $log + bcmcmd -n $asic "dbal table dump Table=SAI_FG_TRAP" | grep "CORE" | awk -F'|' '{print $2,$34}' >> $log + echo "EPMF_Cascade hits:" >> $log + # entries 51,52,53,54,55,56 have drop action + bcmcmd -n $asic "dbal table dump Table=EPMF_Cascade" | grep "CORE" | awk -F'|' '{print $2,$10}'>> $log + clear_tcam_hit_bits +} + +check_egress_drops() { + hit=0 + pqp_uc_discard=$(bcmcmd -n $asic "g hex PQP_PQP_DISCARD_UNICAST_PACKET_COUNTER.PQP${core}"| head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + erpp_discard=$(bcmcmd -n $asic "g hex PQP_ERPP_DISCARDED_PACKET_COUNTER.PQP${core}"| head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + rqp_debug_counters=$(bcmcmd -n $asic "g RQP_PRP_DEBUG_COUNTERS.RQP${core}" | head -n -1 | tail -n +2 | sed -e 's/=/ /g'| sed -e 's/,/ /g'|tr -dc "[:alnum:] =_" ) + + pqp_uc_discard=$(printf "%d" $pqp_uc_discard) + erpp_discard=$(printf "%d" $erpp_discard) + + if [ $pqp_uc_discard -ne 0 ]; then + logmsg="EGRESS_DROP UC_DROP on ASIC $asic CORE $core : PQP_DISCARD_UNICAST_PACKET_COUNTER = $pqp_uc_discard" + timestamp + hit=1; + fi + if [ $erpp_discard -ne 0 ]; then + logmsg="EGRESS_DROP ERPP_DROP on ASIC $asic CORE $core : PQP_ERPP_DISCARDED_PACKET_COUNTER = $erpp_discard" + timestamp + hit=1; + fi + + sop_discard_uc=$(echo $rqp_debug_counters | awk {'print $4'}) + prp_discard_uc=$(echo $rqp_debug_counters | awk {'print $14'}) + dbf_err_cnt=$(echo $rqp_debug_counters | awk {'print $18'}) + + sop_discard_uc=$(printf "%d" $sop_discard_uc) + prp_discard_uc=$(printf "%d" $prp_discard_uc) + dbf_err_cnt=$(printf "%d" $dbf_err_cnt) + + if [ $sop_discard_uc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_SOP_UC_DISCARD on ASIC $asic CORE $core : $sop_discard_uc" + timestamp + hit=1; + fi + if [ $prp_discard_uc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_PRP_UC_DISCARD on ASIC $asic CORE $core : $prp_discard_uc" + timestamp + hit=1; + fi + if [ $dbf_err_cnt -ne 0 ]; then + logmsg="EGRESS_DROP RQP_DBF_ERR on ASIC $asic CORE $core : $dbf_err_cnt" + timestamp + hit=1; + fi + if [ $egr_check_mc -ne 0 ]; then + sop_discard_mc=$(echo $rqp_debug_counters | awk {'print $6'}) + prp_discard_mc=$(echo $rqp_debug_counters | awk {'print $16'}) + sop_discard_mc=$(printf "%d" $sop_discard_mc) + prp_discard_mc=$(printf "%d" $prp_discard_mc) + + pqp_mc_discard=$(bcmcmd -n $asic "g hex PQP_PQP_DISCARD_MULTICAST_PACKET_COUNTER.PQP${core}" | head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + pqp_mc_discard=$(printf "%d" $pqp_mc_discard) + if [ $pqp_mc_discard -ne 0 ]; then + logmsg="EGRESS_DROP MC_DROP ASIC $asic CORE $core : PQP_DISCARD_MULTICAST_PACKET_COUNTER = $pqp_mc_discard" + timestamp + hit=1; + fi + if [ $sop_discard_mc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_SOP_MC_DISCARD on ASIC $asic CORE $core : $sop_discard_mc" + timestamp + hit=1; + fi + if [ $prp_discard_mc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_PRP_MC_DISCARD on ASIC $asic CORE $core : $prp_discard_mc" + timestamp + hit=1; + fi + fi + if [ $hit -eq 0 ] ; then + return + fi + + decode_last_pqp_drop + # bcmcmd -n $asic "g chg ECGM_RQP_DISCARD_REASONS.ECGM${core}" | grep "=" >> $log + decode_last_rqp_drop + bcmcmd -n $asic "g chg PQP_INTERRUPT_REGISTER.PQP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg RQP_INTERRUPT_REGISTER.RQP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s PQP_INTERRUPT_REGISTER.PQP${core} -1" > /dev/null + bcmcmd -n $asic "s RQP_INTERRUPT_REGISTER.RQP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg RQP_PACKET_REASSEMBLY_INTERRUPT_REGISTER.RQP${core}"| tail -2 | head -n -1 >> $log + bcmcmd -n $asic "s RQP_PACKET_REASSEMBLY_INTERRUPT_REGISTER.RQP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg FDR_INTERRUPT_REGISTER.FDR${core}"| head -n -1 | tail -n +2 >> $log + # FDA0 block is shared by both cores + bcmcmd -n $asic "g chg FDA_INTERRUPT_REGISTER.FDA0"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s FDR_INTERRUPT_REGISTER.FDR${core} -1" > /dev/null + bcmcmd -n $asic "s FDA_INTERRUPT_REGISTER.FDA0 -1" > /dev/null + + bcmcmd -n $asic "g chg ERPP_INTERRUPT_REGISTER.ERPP${core}"| head -n -1 | tail -n +2>> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARD_INTERRUPT_REGISTER.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARD_INTERRUPT_REGISTER_2.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s ERPP_ERPP_DISCARD_INTERRUPT_REGISTER.ERPP${core} -1" > /dev/null + bcmcmd -n $asic "s ERPP_ERPP_DISCARD_INTERRUPT_REGISTER_2.ERPP${core} -1" > /dev/null + bcmcmd -n $asic "s ERPP_INTERRUPT_REGISTER.ERPP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARDS_INTERRUPT_REGISTER_MASK.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARDS_INTERRUPT_REGISTER_MASK_2.ERPP${core}"| head -n -1 | tail -n +2 >> $log + + #bcmcmd -n $asic "g chg IPT_FLOW_CONTROL_DEBUG.IPT${core}" >> $log + bcmcmd -n $asic "tm egr con"| head -n -1 | tail -n +2 >> $log + + if [ $egr_check_pmf_hit_bits -eq 1 ]; then + dump_tcam_drop_action_hits + fi + if [ $egr_diag_counter_g -eq 1 ]; then + bcmcmd -n $asic "diag counter g nz core=${core}"| head -n -1 | tail -n +2 >> $log + fi + echo "$delim" >> $log + echo >> $log +} + +dump_ingress_traps() { + bcmcmd -n $asic "g IPPB_DBG_FLP_DATA_PATH_TRAP.IPPB${core}" | head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s IPPB_DBG_FLP_DATA_PATH_TRAP.IPPB${core} -1"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g IPPE_DBG_LLR_TRAP_0.IPPE${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s IPPE_DBG_LLR_TRAP_0.IPPE${core} -1"| head -n -1 | tail -n +2 >> $log +} +dump_macsec() { + bcmcmd -n $asic "sec stat show; sec stat clear" >> $log +} + +rjct_filename=rjct_status.txt + +check_ingress_drops() { + hit=0 + bcmcmd -n $asic "getreg chg CGM_REJECT_STATUS_BITMAP.CGM${core}" | awk '{split($0,a,":"); print a[2]}' > $rjct_filename + while read -r line; do + [ -z $line ] && continue + res=$(echo $line | grep -v "," | grep "<>") + if [ -z $res ]; then + hit=1 + fi + done < "$rjct_filename" + + if [ $hit == 1 ]; then + logmsg="INGRESS_DROP asic $asic core $core" + timestamp + cat $rjct_filename >> $log + bcmcmd -n $asic "g CGM_MAX_VOQ_WORDS_QSIZE_TRACK.CGM${core}" | head -n -1 | tail -n +2 >> $log + #bcmcmd -n $asic "g chg IPT_FLOW_CONTROL_DEBUG.IPT${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "tm ing cong core=$core" >> $log + bcmcmd -n $asic "trap last info core=$core" >> $log + bcmcmd -n $asic "pp vis ppi core=$core" >> $log + bcmcmd -n $asic "pp vis fdt core=$core" >> $log + bcmcmd -n $asic "pp vis ikleap core=$core" >> $log + #bcmcmd -n $asic "pp vis last" >> $log + if [ $ing_check_mc -eq 1 ] ; then + bcmcmd -n $asic "dbal table dump table=mcdb" >> $log + bcmcmd -n $asic "g MTM_ING_MCDB_OFFSET" | head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g MTM_EGR_MCDB_OFFSET" | head -n -1 | tail -n +2 >> $log + fi + bcmcmd -n $asic "diag counter g nz core=${core}" >> $log + echo "" >> $log + dump_ingress_traps + echo "" >> $log + if [ $ing_check_macsec -eq 1 ] ; then + dump_macsec + fi + echo "$delim" >> $log + fi +} + +# clear stats +for asic in "${asics[@]}" +do + bcmcmd -n $asic "sec stat clear; clear counter; clear interrupt all" >> /dev/null +done + +iter_a=(0 0 0 0) +while true; +do + for asic in "${asics[@]}" + do + for core in "${cores[@]}" + do + index=$(($asic*2+$core)) + iter=$((${iter_a[$index]}+1)) + if [ $iter -eq $maxiter ] ; then + iter_a[$index]=0; + sleep $sleep_period + continue + fi + iter_a[$index]=$iter + # for majority of polling cycles, check the PQP drop reason and queue + if [ $iter -gt 4 ] ; then + decode_last_pqp_drop + continue + fi + # check for any change in pqp disabled port mask + if [ $iter -eq 1 ] ; then + check_new_port_state + continue + fi + if [ $iter -eq 2 ] ; then + check_egress_drops + continue + fi + if [ $iter -eq 3 ]; then + check_ingress_drops + continue + fi + if [ $iter -eq 4 ]; then + decode_last_rqp_drop + fi + done + done +done + diff --git a/scripts/dpu-tty.py b/scripts/dpu-tty.py new file mode 100755 index 0000000000..ff0b041b01 --- /dev/null +++ b/scripts/dpu-tty.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2024 Cisco Systems, Inc. +# + +import argparse +import json +import os +import subprocess +from sonic_py_common import device_info + +UART_CON = '/usr/bin/picocom' + + +def get_dpu_tty(dpu, tty, baud): + + platform = device_info.get_platform() + if not platform: + print("No platform") + return None + + # Get platform path. + platform_path = device_info.get_path_to_platform_dir() + + if os.path.isfile(os.path.join(platform_path, device_info.PLATFORM_JSON_FILE)): + json_file = os.path.join(platform_path, device_info.PLATFORM_JSON_FILE) + + try: + with open(json_file, 'r') as file: + platform_data = json.load(file) + except (json.JSONDecodeError, IOError, TypeError, ValueError): + print("No platform.json") + return None + + dpus = platform_data.get('DPUS', None) + if dpus is None: + print("No DPUs in platform.json") + return None + + if tty is None: + dev = dpus[dpu]["serial-console"]["device"] + else: + # overwrite tty device in platform.json + dev = tty + + if baud is None: + baud = dpus[dpu]["serial-console"]["baud-rate"] + return dev, baud + + +def main(): + + parser = argparse.ArgumentParser(description='DPU TTY Console Utility') + parser.add_argument('-n', '--name', required=True) + parser.add_argument('-t', '--tty') + parser.add_argument('-b', '--baud') + args = parser.parse_args() + + dpu_tty, dpu_baud = get_dpu_tty(args.name, args.tty, args.baud) + # Use UART console utility for error checking of dpu_tty and dpu_baud. + + p = subprocess.run([UART_CON, '-b', dpu_baud, '/dev/%s' % dpu_tty]) + if p.returncode: + print('{} failed'.format(p.args)) + if p.stdout: + print(p.stdout) + if p.stderr: + print(p.stderr) + return p.returncode + + +if __name__ == "__main__": + exit(main()) diff --git a/scripts/ecnconfig b/scripts/ecnconfig index e3b08d2bd3..9b2deab4dc 100755 --- a/scripts/ecnconfig +++ b/scripts/ecnconfig @@ -5,7 +5,7 @@ ecnconfig is the utility to 1) show and change ECN configuration -usage: ecnconfig [-h] [-v] [-l] [-p PROFILE] [-gmin GREEN_MIN] +usage: ecnconfig [-h] [-v] [-l] [-p PROFILE] [-gmin GREEN_MIN] [-n NAMESPACE] [-gmax GREEN_MAX] [-ymin YELLOW_MIN] [-ymax YELLOW_MAX] [-rmin RED_MIN] [-rmax RED_MAX] [-gdrop GREEN_DROP_PROB] [-ydrop YELLOW_DROP_PROB] [-rdrop RED_DROP_PROB] [-vv] @@ -16,6 +16,7 @@ optional arguments: -vv --verbose verbose output -l --list show ECN WRED configuration -p --profile specify WRED profile name + -n --namespace show ECN configuration for specified namespace -gmin --green-min set min threshold for packets marked green -gmax --green-max set max threshold for packets marked green -ymin --yellow-min set min threshold for packets marked yellow @@ -47,7 +48,7 @@ $ecnconfig -q 3 ECN status: queue 3: on """ -import argparse +import click import json import os import sys @@ -62,12 +63,17 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from sonic_py_common import multi_asic +from utilities_common import multi_asic as multi_asic_util +from utilities_common.general import load_db_config WRED_PROFILE_TABLE_NAME = "WRED_PROFILE" WRED_CONFIG_FIELDS = { @@ -82,7 +88,6 @@ WRED_CONFIG_FIELDS = { "rdrop": "red_drop_probability" } -PORT_TABLE_NAME = "PORT" QUEUE_TABLE_NAME = "QUEUE" DEVICE_NEIGHBOR_TABLE_NAME = "DEVICE_NEIGHBOR" FIELD = "wred_profile" @@ -96,18 +101,25 @@ class EcnConfig(object): """ Process ecnconfig """ - def __init__(self, filename, verbose): + def __init__(self, test_filename, verbose, namespace): self.ports = [] self.queues = [] - self.filename = filename self.verbose = verbose + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.num_wred_profiles = 0 - # Set up db connections - self.db = ConfigDBConnector() - self.db.connect() + # For unit testing + self.test_filename = test_filename + self.updated_profile_tables = {} + @multi_asic_util.run_on_multi_asic def list(self): - wred_profiles = self.db.get_table(WRED_PROFILE_TABLE_NAME) + """ + List all WRED profiles. + """ + wred_profiles = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) for name, data in wred_profiles.items(): profile_name = name profile_data = data @@ -117,12 +129,18 @@ class EcnConfig(object): line = [field, value] config.append(line) print(tabulate(config) + "\n") - if self.verbose: - print("Total profiles: %d" % len(wred_profiles)) + self.num_wred_profiles += len(wred_profiles) - # get parameters of a WRED profile def get_profile_data(self, profile): - wred_profiles = self.db.get_table(WRED_PROFILE_TABLE_NAME) + """ + Get parameters of a WRED profile + """ + if self.namespace or not multi_asic.is_multi_asic(): + db = ConfigDBConnector(namespace=self.namespace) + db.connect() + wred_profiles = db.get_table(WRED_PROFILE_TABLE_NAME) + else: + wred_profiles = multi_asic.get_table(WRED_PROFILE_TABLE_NAME) for profile_name, profile_data in wred_profiles.items(): if profile_name == profile: @@ -131,6 +149,9 @@ class EcnConfig(object): return None def validate_profile_data(self, profile_data): + """ + Validate threshold, probability and color values. + """ result = True # check if thresholds are non-negative integers @@ -168,73 +189,116 @@ class EcnConfig(object): return result + @multi_asic_util.run_on_multi_asic def set_wred_threshold(self, profile, threshold, value): + """ + Single asic behaviour: + Set threshold value on default namespace + + Multi asic behaviour: + Set threshold value on the specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + # Modify the threshold field = WRED_CONFIG_FIELDS[threshold] if self.verbose: - print("Setting %s value to %s" % (field, value)) - self.db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) - if self.filename is not None: - prof_table = self.db.get_table(WRED_PROFILE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump(prof_table, fd) + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + print("Setting %s value to %s%s" % (field, value, namespace_str)) + self.config_db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) + + # Record the change for unit testing + if self.test_filename: + profile_table = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) + if self.multi_asic.current_namespace in self.updated_profile_tables.keys(): + self.updated_profile_tables[self.multi_asic.current_namespace][profile][threshold] = value + else: + self.updated_profile_tables[self.multi_asic.current_namespace] = profile_table + @multi_asic_util.run_on_multi_asic def set_wred_prob(self, profile, drop_color, value): + """ + Single asic behaviour: + Set drop probability on default namespace + + Multi asic behaviour: + Set drop probability value on the specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + # Modify the drop probability field = WRED_CONFIG_FIELDS[drop_color] if self.verbose: - print("Setting %s value to %s%%" % (field, value)) - self.db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) - if self.filename is not None: - prof_table = self.db.get_table(WRED_PROFILE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump(prof_table, fd) + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + print("Setting %s value to %s%%%s" % (field, value, namespace_str)) + self.config_db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) + + # Record the change for unit testing + if self.test_filename: + profile_table = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) + if self.multi_asic.current_namespace in self.updated_profile_tables.keys(): + self.updated_profile_tables[self.multi_asic.current_namespace][profile][field] = value + else: + self.updated_profile_tables[self.multi_asic.current_namespace] = profile_table class EcnQ(object): """ Process ecn on/off on queues """ - def __init__(self, queues, filename, verbose): + def __init__(self, queues, test_filename, verbose, namespace): self.ports_key = [] self.queues = queues.split(',') - self.filename = filename self.verbose = verbose + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.db = None - # Set up db connections - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.CONFIG_DB) - - self.gen_ports_key() + # For unit testing + self.test_filename = test_filename + self.updated_q_table = {} def gen_ports_key(self): - if self.ports_key is not None: - port_table = self.config_db.get_table(DEVICE_NEIGHBOR_TABLE_NAME) - self.ports_key = list(port_table.keys()) + port_table = self.config_db.get_table(DEVICE_NEIGHBOR_TABLE_NAME) + self.ports_key = list(port_table.keys()) - # Verify at least one port is available - if len(self.ports_key) == 0: - raise Exception("No active ports detected in table '{}'".format(DEVICE_NEIGHBOR_TABLE_NAME)) + # Verify at least one port is available + if len(self.ports_key) == 0: + raise Exception("No active ports detected in table '{}'".format(DEVICE_NEIGHBOR_TABLE_NAME)) - # In multi-ASIC platforms backend ethernet ports are identified as - # 'Ethernet-BPxy'. Add 1024 to sort backend ports to the end. - self.ports_key.sort( - key = lambda k: int(k[8:]) if "BP" not in k else int(k[11:]) + 1024 - ) + # In multi-ASIC platforms backend ethernet ports are identified as + # 'Ethernet-BPxy'. Add 1024 to sort backend ports to the end. + self.ports_key.sort( + key = lambda k: int(k[8:]) if "BP" not in k else int(k[11:]) + 1024 + ) def dump_table_info(self): - if self.filename is not None: + """ + A function to dump updated queue tables. + These JSON dumps are used exclusively by unit tests. + The tables are organized by namespaces for multi-asic support. + """ + if self.test_filename is not None: q_table = self.config_db.get_table(QUEUE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump({repr(x):y for x, y in q_table.items()}, fd) + with open(self.test_filename, "w") as fd: + self.updated_q_table[self.multi_asic.current_namespace] = {repr(x):y for x, y in q_table.items()} + json.dump(self.updated_q_table, fd) + @multi_asic_util.run_on_multi_asic def set(self, enable): + """ + Single asic behaviour: + Enable or disable queues on default namespace + + Multi asic behaviour: + Enable or disable queues on a specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + self.gen_ports_key() for queue in self.queues: if self.verbose: print("%s ECN on %s queue %s" % ("Enable" if enable else "Disable", ','.join(self.ports_key), queue)) @@ -252,10 +316,24 @@ class EcnQ(object): self.config_db.mod_entry(QUEUE_TABLE_NAME, key, None) else: self.config_db.set_entry(QUEUE_TABLE_NAME, key, entry) + # For unit testing self.dump_table_info() + @multi_asic_util.run_on_multi_asic def get(self): - print("ECN status:") + """ + Single asic behaviour: + Get status of queues on default namespace + + Multi asic behaviour: + Get status of queues on a specified namespace. + If no namespace is provided, get queue status on all namespaces. + """ + self.gen_ports_key() + namespace = self.multi_asic.current_namespace + namespace_str = f" for namespace {namespace}" if namespace else '' + print(f"ECN status{namespace_str}:") + for queue in self.queues: out = ' '.join(['queue', queue]) if self.verbose: @@ -270,81 +348,77 @@ class EcnQ(object): print("%s: on" % (out)) else: print("%s: off" % (out)) + # For unit testing self.dump_table_info() -def main(): - parser = argparse.ArgumentParser(description='Show and change:\n' - '1) ECN WRED configuration\n' - '2) ECN on/off status on queues', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show ECN WRED configuration') - parser.add_argument('-p', '--profile', type=str, help='specify WRED profile name', default=None) - parser.add_argument('-gmin', '--green-min', type=str, help='set min threshold for packets marked \'green\'', default=None) - parser.add_argument('-gmax', '--green-max', type=str, help='set max threshold for packets marked \'green\'', default=None) - parser.add_argument('-ymin', '--yellow-min', type=str, help='set min threshold for packets marked \'yellow\'', default=None) - parser.add_argument('-ymax', '--yellow-max', type=str, help='set max threshold for packets marked \'yellow\'', default=None) - parser.add_argument('-rmin', '--red-min', type=str, help='set min threshold for packets marked \'red\'', default=None) - parser.add_argument('-rmax', '--red-max', type=str, help='set max threshold for packets marked \'red\'', default=None) - parser.add_argument('-gdrop', '--green-drop-prob', type=str, help='set max drop/mark probability for packets marked \'green\'', default=None) - parser.add_argument('-ydrop', '--yellow-drop-prob', type=str, help='set max drop/mark probability for packets marked \'yellow\'', default=None) - parser.add_argument('-rdrop', '--red-drop-prob', type=str, help='set max drop/mark probability for packets marked \'red\'', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - parser.add_argument('-vv', '--verbose', action='store_true', help='Verbose output', default=False) - - parser.add_argument('command', nargs='?', choices=['on', 'off'], type=str, help='turn on/off ecn', default=None) - parser.add_argument('-q', '--queue', type=str, help='specify queue index list: 3,4', default=None) - parser.add_argument('-f', '--filename', help='file used by mock tests', type=str, default=None) - +@click.command(help='Show and change: ECN WRED configuration\nECN on/off status on queues') +@click.argument('command', type=click.Choice(['on', 'off'], case_sensitive=False), required=False, default=None) +@click.option('-l', '--list', 'show_config', is_flag=True, help='show ECN WRED configuration') +@click.option('-p', '--profile', type=str, help='specify WRED profile name', default=None) +@click.option('-gmin', '--green-min', type=str, help='set min threshold for packets marked \'green\'', default=None) +@click.option('-gmax', '--green-max', type=str, help='set max threshold for packets marked \'green\'', default=None) +@click.option('-ymin', '--yellow-min', type=str, help='set min threshold for packets marked \'yellow\'', default=None) +@click.option('-ymax', '--yellow-max', type=str, help='set max threshold for packets marked \'yellow\'', default=None) +@click.option('-rmin', '--red-min', type=str, help='set min threshold for packets marked \'red\'', default=None) +@click.option('-rmax', '--red-max', type=str, help='set max threshold for packets marked \'red\'', default=None) +@click.option('-gdrop', '--green-drop-prob', type=str, help='set max drop/mark probability for packets marked \'green\'', default=None) +@click.option('-ydrop', '--yellow-drop-prob', type=str, help='set max drop/mark probability for packets marked \'yellow\'', default=None) +@click.option('-rdrop', '--red-drop-prob', type=str, help='set max drop/mark probability for packets marked \'red\'', default=None) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.option('-vv', '--verbose', is_flag=True, help='Verbose output', default=False) +@click.option('-q', '--queue', type=str, help='specify queue index list: 3,4', default=None) +@click.version_option(version='1.0') +def main(command, show_config, profile, green_min, + green_max, yellow_min, yellow_max, red_min, + red_max, green_drop_prob, yellow_drop_prob, + red_drop_prob, namespace, verbose, queue): + test_filename = None if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2": - sys.argv.extend(['-f', '/tmp/ecnconfig']) - - args = parser.parse_args() + test_filename = '/tmp/ecnconfig' try: - if args.list or args.profile: - prof_cfg = EcnConfig(args.filename, args.verbose) - if args.list: - arg_len_max = 2 - if args.verbose: - arg_len_max += 1 - if args.filename: - arg_len_max += 2 - if len(sys.argv) > arg_len_max: + load_db_config() + if show_config or profile: + # Check if a set option has been provided + setOption = (green_min or green_max or yellow_min or yellow_max or red_min or red_max + or green_drop_prob or yellow_drop_prob or red_drop_prob) + + prof_cfg = EcnConfig(test_filename, verbose, namespace) + if show_config: + if setOption: raise Exception("Input arguments error. No set options allowed when -l[ist] specified") + prof_cfg.list() - elif args.profile: - arg_len_min = 4 - if args.verbose: - arg_len_min += 1 - if args.filename: - arg_len_min += 2 - if len(sys.argv) < arg_len_min: + if verbose: + print("Total profiles: %d" % prof_cfg.num_wred_profiles) + + elif profile: + if not setOption: raise Exception("Input arguments error. Specify at least one threshold parameter to set") # get current configuration data - wred_profile_data = prof_cfg.get_profile_data(args.profile) + wred_profile_data = prof_cfg.get_profile_data(profile) if wred_profile_data is None: - raise Exception("Input arguments error. Invalid WRED profile %s" % (args.profile)) - - if args.green_max: - wred_profile_data[WRED_CONFIG_FIELDS["gmax"]] = args.green_max - if args.green_min: - wred_profile_data[WRED_CONFIG_FIELDS["gmin"]] = args.green_min - if args.yellow_max: - wred_profile_data[WRED_CONFIG_FIELDS["ymax"]] = args.yellow_max - if args.yellow_min: - wred_profile_data[WRED_CONFIG_FIELDS["ymin"]] = args.yellow_min - if args.red_max: - wred_profile_data[WRED_CONFIG_FIELDS["rmax"]] = args.red_max - if args.red_min: - wred_profile_data[WRED_CONFIG_FIELDS["rmin"]] = args.red_min - if args.green_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["gdrop"]] = args.green_drop_prob - if args.yellow_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["ydrop"]] = args.yellow_drop_prob - if args.red_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["rdrop"]] = args.red_drop_prob + raise Exception("Input arguments error. Invalid WRED profile %s for namespace %s" % (profile, namespace)) + + if green_max: + wred_profile_data[WRED_CONFIG_FIELDS["gmax"]] = green_max + if green_min: + wred_profile_data[WRED_CONFIG_FIELDS["gmin"]] = green_min + if yellow_max: + wred_profile_data[WRED_CONFIG_FIELDS["ymax"]] = yellow_max + if yellow_min: + wred_profile_data[WRED_CONFIG_FIELDS["ymin"]] = yellow_min + if red_max: + wred_profile_data[WRED_CONFIG_FIELDS["rmax"]] = red_max + if red_min: + wred_profile_data[WRED_CONFIG_FIELDS["rmin"]] = red_min + if green_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["gdrop"]] = green_drop_prob + if yellow_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["ydrop"]] = yellow_drop_prob + if red_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["rdrop"]] = red_drop_prob # validate new configuration data if prof_cfg.validate_profile_data(wred_profile_data) == False: @@ -352,41 +426,39 @@ def main(): # apply new configuration # the following parameters can be combined in one run - if args.green_max: - prof_cfg.set_wred_threshold(args.profile, "gmax", args.green_max) - if args.green_min: - prof_cfg.set_wred_threshold(args.profile, "gmin", args.green_min) - if args.yellow_max: - prof_cfg.set_wred_threshold(args.profile, "ymax", args.yellow_max) - if args.yellow_min: - prof_cfg.set_wred_threshold(args.profile, "ymin", args.yellow_min) - if args.red_max: - prof_cfg.set_wred_threshold(args.profile, "rmax", args.red_max) - if args.red_min: - prof_cfg.set_wred_threshold(args.profile, "rmin", args.red_min) - if args.green_drop_prob: - prof_cfg.set_wred_prob(args.profile, "gdrop", args.green_drop_prob) - if args.yellow_drop_prob: - prof_cfg.set_wred_prob(args.profile, "ydrop", args.yellow_drop_prob) - if args.red_drop_prob: - prof_cfg.set_wred_prob(args.profile, "rdrop", args.red_drop_prob) - - elif args.queue: - arg_len_min = 3 - if args.filename: - arg_len_min += 1 - if args.verbose: - arg_len_min += 1 - if len(sys.argv) < arg_len_min: + if green_max: + prof_cfg.set_wred_threshold(profile, "gmax", green_max) + if green_min: + prof_cfg.set_wred_threshold(profile, "gmin", green_min) + if yellow_max: + prof_cfg.set_wred_threshold(profile, "ymax", yellow_max) + if yellow_min: + prof_cfg.set_wred_threshold(profile, "ymin", yellow_min) + if red_max: + prof_cfg.set_wred_threshold(profile, "rmax", red_max) + if red_min: + prof_cfg.set_wred_threshold(profile, "rmin", red_min) + if green_drop_prob: + prof_cfg.set_wred_prob(profile, "gdrop", green_drop_prob) + if yellow_drop_prob: + prof_cfg.set_wred_prob(profile, "ydrop", yellow_drop_prob) + if red_drop_prob: + prof_cfg.set_wred_prob(profile, "rdrop", red_drop_prob) + + # Dump the current config in the file for unit tests + if test_filename: + with open(test_filename, "w") as fd: + json.dump(prof_cfg.updated_profile_tables, fd) + + elif queue: + if queue.split(',') == ['']: raise Exception("Input arguments error. Specify at least one queue by index") - - q_ecn = EcnQ(args.queue, args.filename, args.verbose) - if not args.command: + q_ecn = EcnQ(queue, test_filename, verbose, namespace) + if command is None: q_ecn.get() else: - q_ecn.set(enable = True if args.command == 'on' else False) + q_ecn.set(enable = True if command == 'on' else False) else: - parser.print_help() sys.exit(1) except Exception as e: diff --git a/scripts/fast-reboot b/scripts/fast-reboot index e183c34219..aef71d6cd6 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -304,17 +304,23 @@ function check_mirror_session_acls() ACL_ND="missing" start_time=${SECONDS} elapsed_time=$((${SECONDS} - ${start_time})) + retry_count=0 while [[ ${elapsed_time} -lt 10 ]]; do CHECK_ACL_ENTRIES=0 + retry_count=$((retry_count + 1)) ACL_OUTPUT=$(sonic-db-cli ASIC_DB KEYS "*" | grep SAI_OBJECT_TYPE_ACL_ENTRY) || CHECK_ACL_ENTRIES=$? if [[ ${CHECK_ACL_ENTRIES} -ne 0 ]]; then - error "Failed to retrieve SAI_OBJECT_TYPE_ACL_ENTRY from redis" - exit ${EXIT_NO_MIRROR_SESSION_ACLS} + debug "Failed to retrieve SAI_OBJECT_TYPE_ACL_ENTRY from redis, retrying... (Attempt: ${retry_count})" + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + continue fi ACL_ENTRIES=( ${ACL_OUTPUT} ) if [[ ${#ACL_ENTRIES[@]} -eq 0 ]]; then - error "NO SAI_OBJECT_TYPE_ACL_ENTRY objects found" - exit ${EXIT_NO_MIRROR_SESSION_ACLS} + debug "NO SAI_OBJECT_TYPE_ACL_ENTRY objects found, retrying... (Attempt: ${retry_count})" + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + continue fi for ACL_ENTRY in ${ACL_ENTRIES[@]}; do ACL_PRIORITY=$(sonic-db-cli ASIC_DB HGET ${ACL_ENTRY} SAI_ACL_ENTRY_ATTR_PRIORITY) @@ -332,7 +338,7 @@ function check_mirror_session_acls() elapsed_time=$((${SECONDS} - ${start_time})) done if [[ "${ACL_ARP}" != "found" || "${ACL_ND}" != "found" ]]; then - debug "Failed to program mirror session ACLs on ASIC. ACLs: ARP=${ACL_ARP} ND=${ACL_ND}" + error "Failed to program mirror session ACLs on ASIC. ACLs: ARP=${ACL_ARP} ND=${ACL_ND}" exit ${EXIT_NO_MIRROR_SESSION_ACLS} fi debug "Mirror session ACLs (arp, nd) programmed to ASIC successfully" @@ -673,7 +679,7 @@ if is_secureboot && grep -q aboot_machine= /host/machine.conf; then else # check if secure boot is enable in UEFI CHECK_SECURE_UPGRADE_ENABLED=0 - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? + SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then debug "Loading kernel without secure boot" load_kernel diff --git a/scripts/generate_dump b/scripts/generate_dump index 3d0ef3430d..38774c4a37 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1209,6 +1209,16 @@ collect_mellanox() { local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" local sai_dump_folder="/tmp/saisdkdump" local sai_dump_filename="${sai_dump_folder}/sai_sdk_dump_$(date +"%m_%d_%Y_%I_%M_%p")" + local platform=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_platform())") + local platform_folder="/usr/share/sonic/device/${platform}" + local hwsku=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_hwsku())") + local sku_folder="/usr/share/sonic/device/${platform}/${hwsku}" + local cmis_host_mgmt_files=( + "/tmp/nv-syncd-shared/sai.profile" + "${sku_folder}/pmon_daemon_control.json" + "${sku_folder}/media_settings.json" + "${sku_folder}/optics_si_settings.json" + ) if [[ "$( docker container inspect -f '{{.State.Running}}' syncd )" == "true" ]]; then if [[ x"$(sonic-db-cli APPL_DB EXISTS PORT_TABLE:PortInitDone)" == x"1" ]]; then @@ -1251,6 +1261,21 @@ collect_mellanox() { fi save_cmd "get_component_versions.py" "component_versions" + + # Save CMIS-host-management related files + local cmis_host_mgmt_path="cmis-host-mgmt" + + for file in "${cmis_host_mgmt_files[@]}"; do + if [[ -f "${file}" ]]; then + ${CMD_PREFIX}save_file "${file}" "$cmis_host_mgmt_path" false true + fi + done + + if [[ ! -f "${sku_folder}/pmon_daemon_control.json" && -f "${platform_folder}/pmon_daemon_control.json" ]]; then + ${CMD_PREFIX}save_file "${platform_folder}/pmon_daemon_control.json" "$cmis_host_mgmt_path" false true + fi + + save_cmd "show interfaces autoneg status" "autoneg.status" } ############################################################################### diff --git a/scripts/mmuconfig b/scripts/mmuconfig index ebeb74fdaf..3986f3ba1b 100755 --- a/scripts/mmuconfig +++ b/scripts/mmuconfig @@ -18,17 +18,23 @@ optional arguments: import os import sys -import argparse +import click import tabulate import traceback import json +from utilities_common.general import load_db_config +from sonic_py_common import multi_asic +from utilities_common import multi_asic as multi_asic_util BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME = "DEFAULT_LOSSLESS_BUFFER_PARAMETER" DYNAMIC_THRESHOLD = "dynamic_th" +DYNAMIC_THRESHOLD_MIN = -8 +DYNAMIC_THRESHOLD_MAX = 8 STATIC_THRESHOLD = "static_th" +STATIC_THRESHOLD_MIN = 0 BUFFER_PROFILE_FIELDS = { "alpha": DYNAMIC_THRESHOLD, "staticth" : STATIC_THRESHOLD @@ -42,6 +48,11 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() + else: + mock_tables.dbconnector.load_database_config() except KeyError: pass @@ -49,22 +60,21 @@ except KeyError: from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector class MmuConfig(object): - def __init__(self, verbose, config, filename): + def __init__(self, verbose, config, filename, namespace): self.verbose = verbose self.config = config self.filename = filename + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.db = None - # Set up db connections - if self.config: - self.db = ConfigDBConnector() - self.db.connect() - else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.STATE_DB, False) + # For unit testing + self.updated_profile_table = {} def get_table(self, tablename): if self.config: - return self.db.get_table(tablename) + return self.config_db.get_table(tablename) entries = {} keys = self.db.keys(self.db.STATE_DB, tablename + '*') @@ -77,13 +87,15 @@ class MmuConfig(object): return entries + @multi_asic_util.run_on_multi_asic def list(self): + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' lossless_traffic_pattern = self.get_table(DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME) if lossless_traffic_pattern: for _, pattern in lossless_traffic_pattern.items(): config = [] - print("Lossless traffic pattern:") + print(f"Lossless traffic pattern{namespace_str}:") for field, value in pattern.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") @@ -93,97 +105,88 @@ class MmuConfig(object): for pool_name, pool_data in buf_pools.items(): config = [] - print("Pool: " + pool_name) + print(f"Pool{namespace_str}: " + pool_name) for field, value in pool_data.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") if self.verbose: print("Total pools: %d\n\n" % len(buf_pools)) else: - print("No buffer pool information available") + print(f"No buffer pool information available{namespace_str}") buf_profs = self.get_table(BUFFER_PROFILE_TABLE_NAME) if buf_profs: for prof_name, prof_data in buf_profs.items(): config = [] - print("Profile: " + prof_name) + print(f"Profile{namespace_str}: " + prof_name) for field, value in prof_data.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") if self.verbose: print("Total profiles: %d" % len(buf_profs)) else: - print("No buffer profile information available") + print(f"No buffer profile information available{namespace_str}") + @multi_asic_util.run_on_multi_asic def set(self, profile, field_alias, value): + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' if os.geteuid() != 0 and os.environ.get("UTILITIES_UNIT_TESTING", "0") != "2": sys.exit("Root privileges required for this operation") field = BUFFER_PROFILE_FIELDS[field_alias] - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - v = int(value) + buf_profs = self.config_db.get_table(BUFFER_PROFILE_TABLE_NAME) if field == DYNAMIC_THRESHOLD: - if v < -8 or v > 8: - sys.exit("Invalid alpha value: 2^(%s)" % (value)) - if profile in buf_profs and DYNAMIC_THRESHOLD not in buf_profs[profile]: sys.exit("%s not using dynamic thresholding" % (profile)) elif field == STATIC_THRESHOLD: - if v < 0: - sys.exit("Invalid static threshold value: (%s)" % (value)) - if profile in buf_profs and STATIC_THRESHOLD not in buf_profs[profile]: sys.exit("%s not using static threshold" % (profile)) else: sys.exit("Set field %s not supported" % (field)) if self.verbose: - print("Setting %s %s value to %s" % (profile, field, value)) - self.db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) + print("Setting %s %s value to %s%s" % (profile, field, value, namespace_str)) + self.config_db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) if self.filename is not None: - prof_table = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) + self.updated_profile_table[self.multi_asic.current_namespace] = self.config_db.get_table(BUFFER_PROFILE_TABLE_NAME) with open(self.filename, "w") as fd: - json.dump(prof_table, fd) - - -def main(config): - if config: - parser = argparse.ArgumentParser(description='Show and change: mmu configuration', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show mmu configuration') - parser.add_argument('-p', '--profile', type=str, help='specify buffer profile name', default=None) - parser.add_argument('-a', '--alpha', type=str, help='set n for dyanmic threshold alpha 2^(n)', default=None) - parser.add_argument('-s', '--staticth', type=str, help='set static threshold', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - else: - parser = argparse.ArgumentParser(description='Show buffer state', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show buffer state') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - parser.add_argument('-vv', '--verbose', action='store_true', help='verbose output', default=False) - parser.add_argument('-f', '--filename', help='file used by mock tests', type=str, default=None) - + json.dump(self.updated_profile_table, fd) + +@click.command(help='Show and change: mmu configuration') +@click.option('-l', '--list', 'show_config', is_flag=True, help='show mmu configuration') +@click.option('-p', '--profile', type=str, help='specify buffer profile name', default=None) +@click.option('-a', '--alpha', type=click.IntRange(DYNAMIC_THRESHOLD_MIN, DYNAMIC_THRESHOLD_MAX), help='set n for dyanmic threshold alpha 2^(n)', default=None) +@click.option('-s', '--staticth', type=click.IntRange(min=STATIC_THRESHOLD_MIN), help='set static threshold', default=None) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.option('-vv', '--verbose', is_flag=True, help='verbose output', default=False) +@click.version_option(version='1.0') +def main(show_config, profile, alpha, staticth, namespace, verbose): + # A test file created for unit test purposes + filename=None if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2": - sys.argv.extend(['-f', '/tmp/mmuconfig']) + filename = '/tmp/mmuconfig' - - args = parser.parse_args() + # Buffershow and mmuconfig cmds share this script + # Buffershow cmd cannot modify configs hence config is set to False + config = True if sys.argv[0].split('/')[-1] == "mmuconfig" else False try: - mmu_cfg = MmuConfig(args.verbose, config, args.filename) - if args.list: + load_db_config() + mmu_cfg = MmuConfig(verbose, config, filename, namespace) + + # Both mmuconfig and buffershow have access to show_config option + if show_config: mmu_cfg.list() - elif config and args.profile: - if args.alpha: - mmu_cfg.set(args.profile, "alpha", args.alpha) - elif args.staticth: - mmu_cfg.set(args.profile, "staticth", args.staticth) + # Buffershow cannot modify profiles + elif config and profile: + if alpha: + mmu_cfg.set(profile, "alpha", alpha) + elif staticth: + mmu_cfg.set(profile, "staticth", staticth) else: - parser.print_help() + ctx = click.get_current_context() + click.echo(ctx.get_help()) sys.exit(1) except Exception as e: @@ -192,7 +195,4 @@ def main(config): sys.exit(1) if __name__ == "__main__": - if sys.argv[0].split('/')[-1] == "mmuconfig": - main(True) - else: - main(False) + main() diff --git a/scripts/portstat b/scripts/portstat index 6294ba57a9..58cc9aefd6 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -8,16 +8,10 @@ import json import argparse -import datetime import os.path import sys import time -from collections import OrderedDict, namedtuple - -from natsort import natsorted -from tabulate import tabulate -from sonic_py_common import multi_asic -from sonic_py_common import device_info +from collections import OrderedDict # mock the redis for unit test purposes # try: @@ -27,6 +21,13 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector + + if os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] == "1": + import mock + import sonic_py_common + from swsscommon.swsscommon import SonicV2Connector + sonic_py_common.device_info.is_supervisor = mock.MagicMock(return_value=True) + SonicV2Connector.delete_all_by_pattern = mock.MagicMock() if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": import mock_tables.mock_multi_asic mock_tables.dbconnector.load_namespace_config() @@ -34,530 +35,11 @@ try: except KeyError: pass -from swsscommon.swsscommon import CounterTable, PortCounter from utilities_common import constants from utilities_common.intf_filter import parse_interface_in_filter -import utilities_common.multi_asic as multi_asic_util -from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, format_util, format_number_with_comma from utilities_common.cli import json_serial, UserCache - -""" -The order and count of statistics mentioned below needs to be in sync with the values in portstat script -So, any fields added/deleted in here should be reflected in portstat script also -""" -NStats = namedtuple("NStats", "rx_ok, rx_err, rx_drop, rx_ovr, tx_ok,\ - tx_err, tx_drop, tx_ovr, rx_byt, tx_byt,\ - rx_64, rx_65_127, rx_128_255, rx_256_511, rx_512_1023, rx_1024_1518, rx_1519_2047, rx_2048_4095, rx_4096_9216, rx_9217_16383,\ - rx_uca, rx_mca, rx_bca, rx_all,\ - tx_64, tx_65_127, tx_128_255, tx_256_511, tx_512_1023, tx_1024_1518, tx_1519_2047, tx_2048_4095, tx_4096_9216, tx_9217_16383,\ - tx_uca, tx_mca, tx_bca, tx_all,\ - rx_jbr, rx_frag, rx_usize, rx_ovrrun,\ - fec_corr, fec_uncorr, fec_symbol_err") -header_all = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', - 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_std = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', - 'TX_OK', 'TX_BPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_errors_only = ['IFACE', 'STATE', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_fec_only = ['IFACE', 'STATE', 'FEC_CORR', 'FEC_UNCORR', 'FEC_SYMBOL_ERR'] -header_rates_only = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] - -rates_key_list = [ 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_BPS', 'TX_PPS', 'TX_UTIL' ] -ratestat_fields = ("rx_bps", "rx_pps", "rx_util", "tx_bps", "tx_pps", "tx_util") -RateStats = namedtuple("RateStats", ratestat_fields) - -""" -The order and count of statistics mentioned below needs to be in sync with the values in portstat script -So, any fields added/deleted in here should be reflected in portstat script also -""" -BUCKET_NUM = 45 -counter_bucket_dict = { - 0:['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS'], - 1:['SAI_PORT_STAT_IF_IN_ERRORS'], - 2:['SAI_PORT_STAT_IF_IN_DISCARDS'], - 3:['SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS'], - 4:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS'], - 5:['SAI_PORT_STAT_IF_OUT_ERRORS'], - 6:['SAI_PORT_STAT_IF_OUT_DISCARDS'], - 7:['SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS'], - 8:['SAI_PORT_STAT_IF_IN_OCTETS'], - 9:['SAI_PORT_STAT_IF_OUT_OCTETS'], - 10:['SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS'], - 11:['SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS'], - 12:['SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS'], - 13:['SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS'], - 14:['SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS'], - 15:['SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS'], - 16:['SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS'], - 17:['SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS'], - 18:['SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS'], - 19:['SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS'], - 20:['SAI_PORT_STAT_IF_IN_UCAST_PKTS'], - 21:['SAI_PORT_STAT_IF_IN_MULTICAST_PKTS'], - 22:['SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], - 23:['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_MULTICAST_PKTS', 'SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], - 24:['SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS'], - 25:['SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS'], - 26:['SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS'], - 27:['SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS'], - 28:['SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS'], - 29:['SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS'], - 30:['SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS'], - 31:['SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS'], - 32:['SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS'], - 33:['SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS'], - 34:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS'], - 35:['SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS'], - 36:['SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], - 37:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS', 'SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], - 38:['SAI_PORT_STAT_ETHER_STATS_JABBERS'], - 39:['SAI_PORT_STAT_ETHER_STATS_FRAGMENTS'], - 40:['SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS'], - 41:['SAI_PORT_STAT_IP_IN_RECEIVES'], - 42:['SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES'], - 43:['SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES'], - 44:['SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS'] -} - -STATUS_NA = 'N/A' - -RATES_TABLE_PREFIX = "RATES:" - -COUNTER_TABLE_PREFIX = "COUNTERS:" -COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" - -PORT_STATUS_TABLE_PREFIX = "PORT_TABLE:" -PORT_STATE_TABLE_PREFIX = "PORT_TABLE|" -PORT_OPER_STATUS_FIELD = "oper_status" -PORT_ADMIN_STATUS_FIELD = "admin_status" -PORT_STATUS_VALUE_UP = 'UP' -PORT_STATUS_VALUE_DOWN = 'DOWN' -PORT_SPEED_FIELD = "speed" - -PORT_STATE_UP = 'U' -PORT_STATE_DOWN = 'D' -PORT_STATE_DISABLED = 'X' - - -class Portstat(object): - def __init__(self, namespace, display_option): - self.db = None - self.multi_asic = multi_asic_util.MultiAsic(display_option, namespace) - - def get_cnstat_dict(self): - self.cnstat_dict = OrderedDict() - self.cnstat_dict['time'] = datetime.datetime.now() - self.ratestat_dict = OrderedDict() - self.collect_stat() - return self.cnstat_dict, self.ratestat_dict - - @multi_asic_util.run_on_multi_asic - def collect_stat(self): - """ - Collect the statisitics from all the asics present on the - device and store in a dict - """ - - cnstat_dict, ratestat_dict = self.get_cnstat() - self.cnstat_dict.update(cnstat_dict) - self.ratestat_dict.update(ratestat_dict) - - def get_cnstat(self): - """ - Get the counters info from database. - """ - def get_counters(port): - """ - Get the counters from specific table. - """ - fields = ["0"]*BUCKET_NUM - - _, fvs = counter_table.get(PortCounter(), port) - fvs = dict(fvs) - for pos, cntr_list in counter_bucket_dict.items(): - for counter_name in cntr_list: - if counter_name not in fvs: - fields[pos] = STATUS_NA - elif fields[pos] != STATUS_NA: - fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) - - cntr = NStats._make(fields)._asdict() - return cntr - - def get_rates(table_id): - """ - Get the rates from specific table. - """ - fields = ["0","0","0","0","0","0"] - for pos, name in enumerate(rates_key_list): - full_table_id = RATES_TABLE_PREFIX + table_id - counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name) - if counter_data is None: - fields[pos] = STATUS_NA - elif fields[pos] != STATUS_NA: - fields[pos] = float(counter_data) - cntr = RateStats._make(fields) - return cntr - - # Get the info from database - counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP); - # Build a dictionary of the stats - cnstat_dict = OrderedDict() - cnstat_dict['time'] = datetime.datetime.now() - ratestat_dict = OrderedDict() - counter_table = CounterTable(self.db.get_redis_client(self.db.COUNTERS_DB)) - if counter_port_name_map is None: - return cnstat_dict, ratestat_dict - for port in natsorted(counter_port_name_map): - port_name = port.split(":")[0] - if self.multi_asic.skip_display(constants.PORT_OBJ, port_name): - continue - cnstat_dict[port] = get_counters(port) - ratestat_dict[port] = get_rates(counter_port_name_map[port]) - return cnstat_dict, ratestat_dict - - def get_port_speed(self, port_name): - """ - Get the port speed - """ - # Get speed from APPL_DB - state_db_table_id = PORT_STATE_TABLE_PREFIX + port_name - app_db_table_id = PORT_STATUS_TABLE_PREFIX + port_name - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) - speed = self.db.get(self.db.STATE_DB, state_db_table_id, PORT_SPEED_FIELD) - oper_status = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_OPER_STATUS_FIELD) - if speed is None or speed == STATUS_NA or oper_status != "up": - speed = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_SPEED_FIELD) - if speed is not None: - return int(speed) - return STATUS_NA - - def get_port_state(self, port_name): - """ - Get the port state - """ - full_table_id = PORT_STATUS_TABLE_PREFIX + port_name - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) - admin_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_ADMIN_STATUS_FIELD) - oper_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_OPER_STATUS_FIELD) - - if admin_state is None or oper_state is None: - continue - if admin_state.upper() == PORT_STATUS_VALUE_DOWN: - return PORT_STATE_DISABLED - elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_UP: - return PORT_STATE_UP - elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_DOWN: - return PORT_STATE_DOWN - else: - return STATUS_NA - return STATUS_NA - - - def cnstat_print(self, cnstat_dict, ratestat_dict, intf_list, use_json, print_all, errors_only, fec_stats_only, rates_only, detail=False): - """ - Print the cnstat. - """ - - if intf_list and detail: - self.cnstat_intf_diff_print(cnstat_dict, {}, intf_list) - return None - - table = [] - header = None - - for key, data in cnstat_dict.items(): - if key == 'time': - continue - if intf_list and key not in intf_list: - continue - port_speed = self.get_port_speed(key) - rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) - if print_all: - header = header_all - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - elif errors_only: - header = header_errors_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - elif fec_stats_only: - header = header_fec_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['fec_corr']), - format_number_with_comma(data['fec_uncorr']), - format_number_with_comma(data['fec_symbol_err']))) - elif rates_only: - header = header_rates_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - header = header_std - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - if table: - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") - - def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): - """ - Print the difference between two cnstat results for interface. - """ - - for key, cntr in cnstat_new_dict.items(): - if key == 'time': - continue - - if key in cnstat_old_dict: - old_cntr = cnstat_old_dict.get(key) - else: - old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() - - if intf_list and key not in intf_list: - continue - - print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], old_cntr['rx_64']))) - print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], old_cntr['rx_65_127']))) - print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], old_cntr['rx_128_255']))) - print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], old_cntr['rx_256_511']))) - print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], old_cntr['rx_512_1023']))) - print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], old_cntr['rx_1024_1518']))) - print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], old_cntr['rx_1519_2047']))) - print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], old_cntr['rx_2048_4095']))) - print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], old_cntr['rx_4096_9216']))) - print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], old_cntr['rx_9217_16383']))) - - print("") - print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], old_cntr['rx_all']))) - print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], old_cntr['rx_uca']))) - print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], old_cntr['rx_mca']))) - print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], old_cntr['rx_bca']))) - - print("") - print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], old_cntr['rx_jbr']))) - print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], old_cntr['rx_frag']))) - print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], old_cntr['rx_usize']))) - print("Overruns Received.............................. {}".format(ns_diff(cntr['rx_ovrrun'], old_cntr['rx_ovrrun']))) - - print("") - print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], old_cntr['tx_64']))) - print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], old_cntr['tx_65_127']))) - print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], old_cntr['tx_128_255']))) - print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], old_cntr['tx_256_511']))) - print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], old_cntr['tx_512_1023']))) - print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], old_cntr['tx_1024_1518']))) - print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], old_cntr['tx_1519_2047']))) - print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], old_cntr['tx_2048_4095']))) - print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], old_cntr['tx_4096_9216']))) - print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], old_cntr['tx_9217_16383']))) - - print("") - print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], old_cntr['tx_all']))) - print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], old_cntr['tx_uca']))) - print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], old_cntr['tx_mca']))) - print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], old_cntr['tx_bca']))) - - print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) - - - def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, - ratestat_dict, intf_list, use_json, - print_all, errors_only, fec_stats_only, - rates_only, detail=False): - """ - Print the difference between two cnstat results. - """ - - if intf_list and detail: - self.cnstat_intf_diff_print(cnstat_new_dict, cnstat_old_dict, intf_list) - return None - - table = [] - header = None - - for key, cntr in cnstat_new_dict.items(): - if key == 'time': - continue - old_cntr = None - if key in cnstat_old_dict: - old_cntr = cnstat_old_dict.get(key) - - rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(ratestat_fields))) - - if intf_list and key not in intf_list: - continue - port_speed = self.get_port_speed(key) - - if print_all: - header = header_all - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - elif errors_only: - header = header_errors_only - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - elif fec_stats_only: - header = header_fec_only - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), - ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), - ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['fec_corr']), - format_number_with_comma(cntr['fec_uncorr']), - format_number_with_comma(cntr['fec_symbol_err']))) - - elif rates_only: - header = header_rates_only - if old_cntr is not None: - table.append((key, - self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - table.append((key, - self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - header = header_std - if old_cntr is not None: - table.append((key, - self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, - self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - if table: - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") +from utilities_common.portstat import Portstat def main(): parser = argparse.ArgumentParser(description='Display the ports state and counters', diff --git a/scripts/queuestat b/scripts/queuestat index dd8c9d7e0c..3774ede6d9 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -7,7 +7,7 @@ ##################################################################### import json -import argparse +import click import datetime import os.path import sys @@ -102,23 +102,40 @@ def build_json(port, cnstat, voq=False): out.update(ports_stats(k)) return out +class QueuestatWrapper(object): + """A wrapper to execute queuestat cmd over the correct namespaces""" + def __init__(self, namespace, voq): + self.namespace = namespace + self.voq = voq -class Queuestat(object): - def __init__(self, namespace, voq=False): + # Initialize the multi-asic namespace + self.multi_asic = multi_asic_util.MultiAsic(constants.DISPLAY_ALL, namespace_option=namespace) self.db = None - self.multi_asic = multi_asic_util.MultiAsic(constants.DISPLAY_ALL, namespace) - if namespace is not None: - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + + @multi_asic_util.run_on_multi_asic + def run(self, save_fresh_stats, port_to_show_stats, json_opt, non_zero): + queuestat = Queuestat(self.multi_asic.current_namespace, self.db, self.voq) + if save_fresh_stats: + queuestat.save_fresh_stats() + return + + if port_to_show_stats != None: + queuestat.get_print_port_stat(port_to_show_stats, json_opt, non_zero) else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.COUNTERS_DB) + queuestat.get_print_all_stat(json_opt, non_zero) + + +class Queuestat(object): + def __init__(self, namespace, db, voq=False): + self.db = db self.voq = voq + self.namespace = namespace + self.namespace_str = f" for {namespace}" if namespace else '' def get_queue_port(table_id): port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) if port_table_id is None: - print("Port is not available!", table_id) + print(f"Port is not available{self.namespace_str}!", table_id) sys.exit(1) return port_table_id @@ -130,7 +147,7 @@ class Queuestat(object): self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) if self.counter_port_name_map is None: - print("COUNTERS_PORT_NAME_MAP is empty!") + print(f"COUNTERS_PORT_NAME_MAP is empty{self.namespace_str}!") sys.exit(1) self.port_queues_map = {} @@ -148,7 +165,7 @@ class Queuestat(object): counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) if counter_queue_name_map is None: - print("COUNTERS_QUEUE_NAME_MAP is empty!") + print(f"COUNTERS_QUEUE_NAME_MAP is empty{self.namespace_str}!") sys.exit(1) for queue in counter_queue_name_map: @@ -166,7 +183,7 @@ class Queuestat(object): def get_queue_index(table_id): queue_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) if queue_index is None: - print("Queue index is not available!", table_id) + print(f"Queue index is not available{self.namespace_str}!", table_id) sys.exit(1) return queue_index @@ -174,7 +191,7 @@ class Queuestat(object): def get_queue_type(table_id): queue_type = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) if queue_type is None: - print("Queue Type is not available!", table_id) + print(f"Queue Type is not available{self.namespace_str}!", table_id) sys.exit(1) elif queue_type == SAI_QUEUE_TYPE_MULTICAST: return QUEUE_TYPE_MC @@ -185,7 +202,7 @@ class Queuestat(object): elif queue_type == SAI_QUEUE_TYPE_ALL: return QUEUE_TYPE_ALL else: - print("Queue Type is invalid:", table_id, queue_type) + print(f"Queue Type is invalid{self.namespace_str}:", table_id, queue_type) sys.exit(1) if self.voq: @@ -255,6 +272,7 @@ class Queuestat(object): else: hdr = voq_header if self.voq else header if table: + print(f"For namespace {self.namespace}:") print(tabulate(table, hdr, tablefmt='simple', stralign='right')) print() @@ -314,7 +332,7 @@ class Queuestat(object): else: hdr = voq_header if self.voq else header if table: - print(port + " Last cached time was " + str(cnstat_old_dict.get('time'))) + print(port + f" Last cached time{self.namespace_str} was " + str(cnstat_old_dict.get('time'))) print(tabulate(table, hdr, tablefmt='simple', stralign='right')) print() @@ -370,7 +388,7 @@ class Queuestat(object): json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt, non_zero)) else: - print("Last cached time was " + str(cnstat_cached_dict.get('time'))) + print(f"Last cached time{self.namespace_str} was " + str(cnstat_cached_dict.get('time'))) self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt, non_zero) except IOError as e: print(e.errno, e) @@ -395,38 +413,33 @@ class Queuestat(object): else: print("Clear and update saved counters for " + port) -def main(): + +@click.command() +@click.option('-p', '--port', type=str, help='Show the queue conters for just one port', default=None) +@click.option('-c', '--clear', is_flag=True, default=False, help='Clear previous stats and save new ones') +@click.option('-d', '--delete', is_flag=True, default=False, help='Delete saved stats') +@click.option('-j', '--json_opt', is_flag=True, default=False, help='Print in JSON format') +@click.option('-V', '--voq', is_flag=True, default=False, help='display voq stats') +@click.option('-nz','--non_zero', is_flag=True, default=False, help='Display non-zero queue counters') +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Display queuecounters for a specific namespace name or skip for all', default=None) +@click.version_option(version='1.0') +def main(port, clear, delete, json_opt, voq, non_zero, namespace): + """ + Examples: + queuestat + queuestat -p Ethernet0 + queuestat -c + queuestat -d + queuestat -p Ethernet0 -n asic0 + """ + global cnstat_dir global cnstat_fqn_file - parser = argparse.ArgumentParser(description='Display the queue state and counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - queuestat - queuestat -p Ethernet0 - queuestat -c - queuestat -d -""") - - parser.add_argument('-p', '--port', type=str, help='Show the queue conters for just one port', default=None) - parser.add_argument('-c', '--clear', action='store_true', help='Clear previous stats and save new ones') - parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - parser.add_argument('-j', '--json_opt', action='store_true', help='Print in JSON format') - parser.add_argument('-V', '--voq', action='store_true', help='display voq stats') - parser.add_argument('-n','--namespace', default=None, help='Display queue counters for specific namespace') - parser.add_argument('-nz','--non_zero', action='store_true', help='Display non-zero queue counters') - args = parser.parse_args() - - save_fresh_stats = args.clear - delete_stats = args.delete - voq = args.voq - json_opt = args.json_opt - namespace = args.namespace - non_zero = args.non_zero - - port_to_show_stats = args.port + save_fresh_stats = clear + delete_stats = delete + + port_to_show_stats = port cache = UserCache() @@ -436,16 +449,8 @@ Examples: if delete_stats: cache.remove() - queuestat = Queuestat( namespace, voq ) - - if save_fresh_stats: - queuestat.save_fresh_stats() - sys.exit(0) - - if port_to_show_stats!=None: - queuestat.get_print_port_stat(port_to_show_stats, json_opt, non_zero) - else: - queuestat.get_print_all_stat(json_opt, non_zero) + queuestat_wrapper = QueuestatWrapper(namespace, voq) + queuestat_wrapper.run(save_fresh_stats, port_to_show_stats, json_opt, non_zero) sys.exit(0) diff --git a/scripts/reboot b/scripts/reboot index b6f8ff96fb..044334af3e 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -41,7 +41,6 @@ REBOOT_SCRIPT_NAME=$(basename $0) REBOOT_TYPE="${REBOOT_SCRIPT_NAME}" TAG_LATEST=no REBOOT_FLAGS="" -FORCE_REBOOT="no" function debug() { @@ -179,7 +178,7 @@ function check_conflict_boot_in_fw_update() function parse_options() { - while getopts "h?v" opt; do + while getopts "h?vf" opt; do case ${opt} in h|\? ) show_help_and_exit @@ -192,7 +191,6 @@ function parse_options() ;; f ) REBOOT_FLAGS+=" -f" - FORCE_REBOOT="yes" ;; esac done @@ -278,12 +276,9 @@ fi if [ -x ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} ]; then debug "Executing the pre-reboot script" ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} - EXIT_CODE=$? - if [[ ${EXIT_CODE} != ${EXIT_SUCCESS} ]]; then - if [[ "${FORCE_REBOOT}" != "yes" ]]; then - echo "Reboot is interrupted: use -f (force) to override" - exit ${EXIT_ERROR} - fi + EXIT_CODE="$?" + if [[ "${EXIT_CODE}" != "${EXIT_SUCCESS}" ]]; then + debug "WARNING: Failed to handle pre-reboot script: rc=${EXIT_CODE}" fi fi diff --git a/scripts/route_check.py b/scripts/route_check.py index 2fbe041547..a1abd3c352 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -328,16 +328,6 @@ def get_asicdb_routes(namespace): return (selector, subs, sorted(rt)) -def is_bgp_suppress_fib_pending_enabled(namespace): - """ - Retruns True if FIB suppression is enabled in BGP config, False otherwise - """ - show_run_cmd = ['show', 'runningconfiguration', 'bgp', '-n', namespace] - - output = subprocess.check_output(show_run_cmd, text=True) - return 'bgp suppress-fib-pending' in output - - def is_suppress_fib_pending_enabled(namespace): """ Returns True if FIB suppression is enabled, False otherwise @@ -791,20 +781,19 @@ def check_routes(namespace): results[namespace] = {} results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - if is_bgp_suppress_fib_pending_enabled(namespace): - rt_frr_miss = check_frr_pending_routes(namespace) - - if rt_frr_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_FRR_routes"] = rt_frr_miss + rt_frr_miss = check_frr_pending_routes(namespace) - if results: - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all " - "routes in APPL_DB and ASIC_DB are in sync".format(namespace)) - if is_suppress_fib_pending_enabled(namespace): - mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + if rt_frr_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_FRR_routes"] = rt_frr_miss + + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} \ + but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") diff --git a/scripts/soft-reboot b/scripts/soft-reboot index 0b9030a6f7..74d7051b1d 100755 --- a/scripts/soft-reboot +++ b/scripts/soft-reboot @@ -93,7 +93,7 @@ function clear_lingering_reboot_config() if [[ -f ${WARM_DIR}/${REDIS_FILE} ]]; then mv -f ${WARM_DIR}/${REDIS_FILE} ${WARM_DIR}/${REDIS_FILE}.${TIMESTAMP} || /bin/true fi - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true } SCRIPT=$0 @@ -147,9 +147,17 @@ function setup_reboot_variables() fi } +function invoke_kexec() { + /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" $@ +} + function load_kernel() { # Load kernel into the memory - /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" + invoke_kexec -a +} + +function load_kernel_secure() { + invoke_kexec -s } function reboot_pre_check() @@ -215,7 +223,14 @@ stop_sonic_services clear_lingering_reboot_config -load_kernel +# check if secure boot is enabled +CHECK_SECURE_UPGRADE_ENABLED=0 +SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? +if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then + load_kernel +else + load_kernel_secure +fi # Update the reboot cause file to reflect that user issued 'reboot' command # Upon next boot, the contents of this file will be used to determine the diff --git a/scripts/watermarkstat b/scripts/watermarkstat index 99a46d5484..70ea853bc4 100755 --- a/scripts/watermarkstat +++ b/scripts/watermarkstat @@ -5,14 +5,15 @@ # watermarkstat is a tool for displaying watermarks. # ##################################################################### - -import argparse +import click import json import os import sys from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -23,6 +24,10 @@ try: sys.path.insert(0, tests_path) from mock_tables import dbconnector + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + if os.environ["WATERMARKSTAT_UNIT_TESTING"] == "1": input_path = os.path.join(tests_path, "wm_input") mock_db_path = os.path.join(input_path, "mock_db") @@ -66,18 +71,33 @@ COUNTERS_PG_INDEX_MAP = "COUNTERS_PG_INDEX_MAP" COUNTERS_BUFFER_POOL_NAME_MAP = "COUNTERS_BUFFER_POOL_NAME_MAP" -class Watermarkstat(object): +class WatermarkstatWrapper(object): + """A wrapper to execute Watermarkstat over the correct namespaces""" + def __init__(self, namespace): + self.namespace = namespace - def __init__(self): - self.counters_db = SonicV2Connector(use_unix_socket_path=False) - self.counters_db.connect(self.counters_db.COUNTERS_DB) + # Initialize the multi_asic object + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + + @multi_asic_util.run_on_multi_asic + def run(self, clear, persistent, wm_type): + watermarkstat = Watermarkstat(self.db, self.multi_asic.current_namespace) + if clear: + watermarkstat.send_clear_notification(("PERSISTENT" if persistent else "USER", wm_type.upper())) + else: + table_prefix = PERSISTENT_TABLE_PREFIX if persistent else USER_TABLE_PREFIX + watermarkstat.print_all_stat(table_prefix, wm_type) - # connect APP DB for clear notifications - self.app_db = SonicV2Connector(use_unix_socket_path=False) - self.app_db.connect(self.counters_db.APPL_DB) + +class Watermarkstat(object): + + def __init__(self, db, namespace): + self.namespace = namespace + self.db = db def get_queue_type(table_id): - queue_type = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) + queue_type = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) if queue_type is None: print("Queue Type is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -92,7 +112,7 @@ class Watermarkstat(object): sys.exit(1) def get_queue_port(table_id): - port_table_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) if port_table_id is None: print("Port is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -100,7 +120,7 @@ class Watermarkstat(object): return port_table_id def get_pg_port(table_id): - port_table_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, table_id) + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, table_id) if port_table_id is None: print("Port is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -108,7 +128,7 @@ class Watermarkstat(object): return port_table_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) if self.counter_port_name_map is None: print("COUNTERS_PORT_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -127,7 +147,7 @@ class Watermarkstat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get Queues for each port - counter_queue_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) + counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) if counter_queue_name_map is None: print("COUNTERS_QUEUE_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -144,7 +164,7 @@ class Watermarkstat(object): self.port_all_queues_map[port][queue] = counter_queue_name_map[queue] # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) if counter_pg_name_map is None: print("COUNTERS_PG_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -154,7 +174,7 @@ class Watermarkstat(object): self.port_pg_map[port][pg] = counter_pg_name_map[pg] # Get all buffer pools - self.buffer_pool_name_to_oid_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_BUFFER_POOL_NAME_MAP) + self.buffer_pool_name_to_oid_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_BUFFER_POOL_NAME_MAP) if self.buffer_pool_name_to_oid_map is None: print("COUNTERS_BUFFER_POOL_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -194,7 +214,7 @@ class Watermarkstat(object): } def get_queue_index(self, table_id): - queue_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) + queue_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) if queue_index is None: print("Queue index is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -202,7 +222,7 @@ class Watermarkstat(object): return queue_index def get_pg_index(self, table_id): - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, table_id) + pg_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, table_id) if pg_index is None: print("Priority group index is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -256,7 +276,7 @@ class Watermarkstat(object): full_table_id = table_prefix + obj_id idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, watermark) + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, watermark) if counter_data is None or counter_data == '': fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -274,7 +294,7 @@ class Watermarkstat(object): continue db_key = table_prefix + bp_oid - data = self.counters_db.get(self.counters_db.COUNTERS_DB, db_key, type["wm_name"]) + data = self.db.get(self.db.COUNTERS_DB, db_key, type["wm_name"]) if data is None: data = STATUS_NA table.append((buf_pool, data)) @@ -283,58 +303,52 @@ class Watermarkstat(object): # Get stat for each port for port in natsorted(self.counter_port_name_map): row_data = list() + data = self.get_counters(table_prefix, type["obj_map"][port], type["idx_func"], type["wm_name"]) row_data.append(port) row_data.extend(data) table.append(tuple(row_data)) - print(type["message"]) + namespace_str = f" (Namespace {self.namespace})" if multi_asic.is_multi_asic() else '' + print(type["message"] + namespace_str) print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def send_clear_notification(self, data): msg = json.dumps(data, separators=(',', ':')) - self.app_db.publish('APPL_DB', 'WATERMARK_CLEAR_REQUEST', msg) + self.db.publish('APPL_DB', 'WATERMARK_CLEAR_REQUEST', msg) return - -def main(): - - parser = argparse.ArgumentParser(description='Display the watermark counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - watermarkstat -t pg_headroom - watermarkstat -t pg_shared - watermarkstat -t q_shared_all - watermarkstat -p -t q_shared_all - watermarkstat -t q_shared_all -c - watermarkstat -t q_shared_uni -c - watermarkstat -t q_shared_multi -c - watermarkstat -p -t pg_shared - watermarkstat -p -t q_shared_multi -c - watermarkstat -t buffer_pool - watermarkstat -t buffer_pool -c - watermarkstat -p -t buffer_pool -c -""") - - parser.add_argument('-c', '--clear', action='store_true', help='Clear watermarks request') - parser.add_argument('-p', '--persistent', action='store_true', help='Do the operations on the persistent watermark') - parser.add_argument('-t', '--type', required=True, action='store', - choices=['pg_headroom', 'pg_shared', 'q_shared_uni', 'q_shared_multi', 'buffer_pool', 'headroom_pool', 'q_shared_all'], - help='The type of watermark') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - args = parser.parse_args() - watermarkstat = Watermarkstat() - - if args.clear: - watermarkstat.send_clear_notification(("PERSISTENT" if args.persistent else "USER", args.type.upper())) - sys.exit(0) - - table_prefix = PERSISTENT_TABLE_PREFIX if args.persistent else USER_TABLE_PREFIX - watermarkstat.print_all_stat(table_prefix, args.type) +@click.command() +@click.option('-c', '--clear', is_flag=True, help='Clear watermarks request') +@click.option('-p', '--persistent', is_flag=True, help='Do the operations on the persistent watermark') +@click.option('-t', '--type', 'wm_type', type=click.Choice(['pg_headroom', 'pg_shared', 'q_shared_uni', 'q_shared_multi', 'buffer_pool', 'headroom_pool', 'q_shared_all']), help='The type of watermark', required=True) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.version_option(version='1.0') +def main(clear, persistent, wm_type, namespace): + """ + Display the watermark counters + + Examples: + watermarkstat -t pg_headroom + watermarkstat -t pg_shared + watermarkstat -t q_shared_all + watermarkstat -p -t q_shared_all + watermarkstat -t q_shared_all -c + watermarkstat -t q_shared_uni -c + wwatermarkstat -t q_shared_multi -c + watermarkstat -p -t pg_shared + watermarkstat -p -t q_shared_multi -c + watermarkstat -t buffer_pool + watermarkstat -t buffer_pool -c + watermarkstat -p -t buffer_pool -c + watermarkstat -t pg_headroom -n asic0 + watermarkstat -p -t buffer_pool -c -n asic1 + """ + + namespace_context = WatermarkstatWrapper(namespace) + namespace_context.run(clear, persistent, wm_type) sys.exit(0) - if __name__ == "__main__": main() diff --git a/setup.py b/setup.py index 6a66f012f9..dc5fa4a9b4 100644 --- a/setup.py +++ b/setup.py @@ -88,7 +88,6 @@ 'utilities_common', 'watchdogutil', 'sonic_cli_gen', - 'wol', ], package_data={ 'generic_config_updater': ['gcu_services_validator.conf.json', 'gcu_field_operation_validators.conf.json'], @@ -121,10 +120,12 @@ 'scripts/decode-syseeprom', 'scripts/dropcheck', 'scripts/disk_check.py', + 'scripts/dpu-tty.py', 'scripts/dropconfig', 'scripts/dropstat', 'scripts/dualtor_neighbor_check.py', 'scripts/dump_nat_entries.py', + 'scripts/debug_voq_chassis_packet_drops.sh', 'scripts/ecnconfig', 'scripts/fabricstat', 'scripts/fanshow', @@ -223,7 +224,6 @@ 'undebug = undebug.main:cli', 'watchdogutil = watchdogutil.main:watchdogutil', 'sonic-cli-gen = sonic_cli_gen.main:cli', - 'wol = wol.main:wol', ] }, install_requires=[ @@ -249,7 +249,7 @@ 'pexpect>=4.8.0', 'semantic-version>=2.8.5', 'prettyprinter>=0.18.0', - 'pyroute2>=0.5.14, <0.6.1', + 'pyroute2==0.7.12', 'requests>=2.25.0, <=2.31.0', 'tabulate==0.9.0', 'toposort==1.6', diff --git a/sfputil/main.py b/sfputil/main.py index 2c8f85d016..58c6855abe 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -18,7 +18,7 @@ import sonic_platform import sonic_platform_base.sonic_sfp.sfputilhelper from sonic_platform_base.sfp_base import SfpBase -from swsscommon.swsscommon import SonicV2Connector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector from natsort import natsorted from sonic_py_common import device_info, logger, multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string @@ -1967,11 +1967,12 @@ def debug(): # 'loopback' subcommand @debug.command() -@click.argument('port_name', required=True, default=None) -@click.argument('loopback_mode', required=True, default="none", - type=click.Choice(["none", "host-side-input", "host-side-output", +@click.argument('port_name', required=True) +@click.argument('loopback_mode', required=True, + type=click.Choice(["host-side-input", "host-side-output", "media-side-input", "media-side-output"])) -def loopback(port_name, loopback_mode): +@click.argument('enable', required=True, type=click.Choice(["enable", "disable"])) +def loopback(port_name, loopback_mode, enable): """Set module diagnostic loopback mode """ physical_port = logical_port_to_physical_port_index(port_name) @@ -1991,17 +1992,82 @@ def loopback(port_name, loopback_mode): click.echo("{}: This functionality is not implemented".format(port_name)) sys.exit(ERROR_NOT_IMPLEMENTED) + namespace = multi_asic.get_namespace_for_port(port_name) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + if config_db is not None: + config_db.connect() + try: + subport = int(config_db.get(config_db.CONFIG_DB, f'PORT|{port_name}', 'subport')) + except TypeError: + click.echo(f"{port_name}: subport is not present in CONFIG_DB") + sys.exit(EXIT_FAIL) + + # If subport is set to 0, assign a default value of 1 to ensure valid subport configuration + if subport == 0: + subport = 1 + else: + click.echo(f"{port_name}: Failed to connect to CONFIG_DB") + sys.exit(EXIT_FAIL) + + state_db = SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + if state_db is not None: + state_db.connect(state_db.STATE_DB) + try: + host_lane_count = int(state_db.get(state_db.STATE_DB, + f'TRANSCEIVER_INFO|{port_name}', + 'host_lane_count')) + except TypeError: + click.echo(f"{port_name}: host_lane_count is not present in STATE_DB") + sys.exit(EXIT_FAIL) + + try: + media_lane_count = int(state_db.get(state_db.STATE_DB, + f'TRANSCEIVER_INFO|{port_name}', + 'media_lane_count')) + except TypeError: + click.echo(f"{port_name}: media_lane_count is not present in STATE_DB") + sys.exit(EXIT_FAIL) + else: + click.echo(f"{port_name}: Failed to connect to STATE_DB") + sys.exit(EXIT_FAIL) + + if 'host-side' in loopback_mode: + lane_mask = get_subport_lane_mask(subport, host_lane_count) + elif 'media-side' in loopback_mode: + lane_mask = get_subport_lane_mask(subport, media_lane_count) + else: + lane_mask = 0 + try: - status = api.set_loopback_mode(loopback_mode) + status = api.set_loopback_mode(loopback_mode, + lane_mask=lane_mask, + enable=enable == 'enable') except AttributeError: click.echo("{}: Set loopback mode is not applicable for this module".format(port_name)) sys.exit(ERROR_NOT_IMPLEMENTED) + except TypeError: + click.echo("{}: Set loopback mode failed. Parameter is not supported".format(port_name)) + sys.exit(EXIT_FAIL) if status: - click.echo("{}: Set {} loopback".format(port_name, loopback_mode)) + click.echo("{}: {} {} loopback".format(port_name, enable, loopback_mode)) else: - click.echo("{}: Set {} loopback failed".format(port_name, loopback_mode)) + click.echo("{}: {} {} loopback failed".format(port_name, enable, loopback_mode)) sys.exit(EXIT_FAIL) + +def get_subport_lane_mask(subport, lane_count): + """Get the lane mask for the given subport and lane count + + Args: + subport (int): Subport number + lane_count (int): Lane count for the subport + + Returns: + int: Lane mask for the given subport and lane count + """ + return ((1 << lane_count) - 1) << ((subport - 1) * lane_count) + + if __name__ == '__main__': cli() diff --git a/show/bgp_common.py b/show/bgp_common.py index b51e9f1879..e9c0e12e8a 100644 --- a/show/bgp_common.py +++ b/show/bgp_common.py @@ -3,7 +3,7 @@ import json import utilities_common.multi_asic as multi_asic_util -from sonic_py_common import multi_asic +from sonic_py_common import device_info, multi_asic from utilities_common import constants ''' @@ -60,10 +60,12 @@ def get_nexthop_info_str(nxhp_info, filterByIp): else: str_2_return = " via {},".format(nxhp_info['ip']) if "interfaceName" in nxhp_info: + intfs = nxhp_info['interfaceName'] if filterByIp: - str_2_return += ", via {}".format(nxhp_info['interfaceName']) + str_2_return += ", via {}".format(intfs) else: - str_2_return += " {},".format(nxhp_info['interfaceName']) + str_2_return += " {},".format(intfs) + elif "directlyConnected" in nxhp_info: str_2_return = " is directly connected," if "interfaceName" in nxhp_info: @@ -80,10 +82,13 @@ def get_nexthop_info_str(nxhp_info, filterByIp): str_2_return += "(vrf {}, {},".format(nxhp_info['vrf'], nxhp_info['interfaceName']) if "active" not in nxhp_info: str_2_return += " inactive" + if "recursive" in nxhp_info: + if device_info.is_voq_chassis(): + str_2_return = " " + str_2_return + " recursive via iBGP" + else: + str_2_return += " (recursive)" if "onLink" in nxhp_info: str_2_return += " onlink" - if "recursive" in nxhp_info: - str_2_return += " (recursive)" if "source" in nxhp_info: str_2_return += ", src {}".format(nxhp_info['source']) if "labels" in nxhp_info: @@ -220,6 +225,12 @@ def merge_to_combined_route(combined_route, route, new_info_l): if nh['interfaceName'] == combined_route[route][j]['nexthops'][y]['interfaceName']: found = True break + if device_info.is_voq_chassis(): + if nh['ip'] == combined_route[route][j]['nexthops'][y]['ip']: + if 'interfaceName' not in combined_route[route][j]['nexthops'][y]: + combined_route[route][j]['nexthops'][y] = nh + found = True + break elif "active" not in nh and "active" not in combined_route[route][j]['nexthops'][y]: if nh['ip'] == combined_route[route][j]['nexthops'][y]['ip']: found = True @@ -253,7 +264,7 @@ def process_route_info(route_info, device, filter_back_end, print_ns_str, asic_c while len(new_info['nexthops']): nh = new_info['nexthops'].pop() if filter_back_end and back_end_intf_set != None and "interfaceName" in nh: - if nh['interfaceName'] in back_end_intf_set: + if nh['interfaceName'] in back_end_intf_set or nh['interfaceName'].startswith('Ethernet-IB'): del_cnt += 1 else: new_nhop_l.append(copy.deepcopy(nh)) @@ -327,6 +338,7 @@ def show_routes(args, namespace, display, verbose, ipver): if display not in ['frontend', 'all']: print("dislay option '{}' is not a valid option.".format(display)) return + device = multi_asic_util.MultiAsic(display, namespace) arg_strg = "" found_json = 0 @@ -376,7 +388,6 @@ def show_routes(args, namespace, display, verbose, ipver): # Need to add "ns" to form bgpX so it is sent to the correct bgpX docker to handle the request cmd = "show {} route {}".format(ipver, arg_strg) output = bgp_util.run_bgp_show_command(cmd, ns) - # in case no output or something went wrong with user specified cmd argument(s) error it out # error from FRR always start with character "%" if output == "": diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 9287eb5af7..f8889e6c32 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -18,6 +18,8 @@ HWSKU_JSON = 'hwsku.json' +REDIS_HOSTIP = "127.0.0.1" + # Read given JSON file def readJsonFile(fileName): try: @@ -646,6 +648,74 @@ def fec_stats(verbose, period, namespace, display): clicommon.run_command(cmd, display_cmd=verbose) + +def get_port_oid_mapping(): + ''' Returns dictionary of all ports interfaces and their OIDs. ''' + db = SonicV2Connector(host=REDIS_HOSTIP) + db.connect(db.COUNTERS_DB) + + port_oid_map = db.get_all(db.COUNTERS_DB, 'COUNTERS_PORT_NAME_MAP') + + db.close(db.COUNTERS_DB) + + return port_oid_map + + +def fetch_fec_histogram(port_oid_map, target_port): + ''' Fetch and display FEC histogram for the given port. ''' + asic_db = SonicV2Connector(host=REDIS_HOSTIP) + asic_db.connect(asic_db.ASIC_DB) + + config_db = ConfigDBConnector() + config_db.connect() + + counter_db = SonicV2Connector(host=REDIS_HOSTIP) + counter_db.connect(counter_db.COUNTERS_DB) + + if target_port not in port_oid_map: + click.echo('Port {} not found in COUNTERS_PORT_NAME_MAP'.format(target_port), err=True) + raise click.Abort() + + port_oid = port_oid_map[target_port] + asic_db_kvp = counter_db.get_all(counter_db.COUNTERS_DB, 'COUNTERS:{}'.format(port_oid)) + + if asic_db_kvp is not None: + + fec_errors = {f'BIN{i}': asic_db_kvp.get + (f'SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S{i}', '0') for i in range(16)} + + # Prepare the data for tabulation + table_data = [(bin_label, error_value) for bin_label, error_value in fec_errors.items()] + + # Define headers + headers = ["Symbol Errors Per Codeword", "Codewords"] + + # Print FEC histogram using tabulate + click.echo(tabulate(table_data, headers=headers)) + else: + click.echo('No kvp found in ASIC DB for port {}, exiting'.format(target_port), err=True) + raise click.Abort() + + asic_db.close(asic_db.ASIC_DB) + config_db.close(config_db.CONFIG_DB) + counter_db.close(counter_db.COUNTERS_DB) + + +# 'fec-histogram' subcommand ("show interfaces counters fec-histogram") +@counters.command('fec-histogram') +@multi_asic_util.multi_asic_click_options +@click.argument('interfacename', required=True) +def fec_histogram(interfacename, namespace, display): + """Show interface counters fec-histogram""" + port_oid_map = get_port_oid_mapping() + + # Try to convert interface name from alias + interfacename = try_convert_interfacename_from_alias(click.get_current_context(), interfacename) + + # Fetch and display the FEC histogram + fetch_fec_histogram(port_oid_map, interfacename) + + # 'rates' subcommand ("show interfaces counters rates") @counters.command() @click.option('-p', '--period') diff --git a/show/main.py b/show/main.py index ca71a01732..3151e4d61b 100755 --- a/show/main.py +++ b/show/main.py @@ -72,6 +72,7 @@ PLATFORM_JSON = 'platform.json' HWSKU_JSON = 'hwsku.json' PORT_STR = "Ethernet" +BMP_STATE_DB = 'BMP_STATE_DB' VLAN_SUB_INTERFACE_SEPARATOR = '.' @@ -291,7 +292,6 @@ def cli(ctx): load_db_config() ctx.obj = Db() - # Add groups from other modules cli.add_command(acl.acl) cli.add_command(chassis_modules.chassis) @@ -648,7 +648,8 @@ def counters(namespace, display, verbose): @pfc.command() @click.argument('interface', type=click.STRING, required=False) -def priority(interface): +@multi_asic_util.multi_asic_click_option_namespace +def priority(interface, namespace): """Show pfc priority""" cmd = ['pfc', 'show', 'priority'] if interface is not None and clicommon.get_interface_naming_mode() == "alias": @@ -656,12 +657,15 @@ def priority(interface): if interface is not None: cmd += [str(interface)] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd) @pfc.command() @click.argument('interface', type=click.STRING, required=False) -def asymmetric(interface): +@multi_asic_util.multi_asic_click_option_namespace +def asymmetric(interface, namespace): """Show asymmetric pfc""" cmd = ['pfc', 'show', 'asymmetric'] if interface is not None and clicommon.get_interface_naming_mode() == "alias": @@ -669,6 +673,8 @@ def asymmetric(interface): if interface is not None: cmd += [str(interface)] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd) @@ -777,23 +783,53 @@ def watermark(): # 'unicast' subcommand ("show queue watermarks unicast") @watermark.command('unicast') -def wm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_uni(namespace): """Show user WM for unicast queues""" command = ['watermarkstat', '-t', 'q_shared_uni'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'multicast' subcommand ("show queue watermarks multicast") @watermark.command('multicast') -def wm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_multi(namespace): """Show user WM for multicast queues""" command = ['watermarkstat', '-t', 'q_shared_multi'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'all' subcommand ("show queue watermarks all") @watermark.command('all') -def wm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_all(namespace): """Show user WM for all queues""" command = ['watermarkstat', '-t', 'q_shared_all'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # @@ -807,23 +843,53 @@ def persistent_watermark(): # 'unicast' subcommand ("show queue persistent-watermarks unicast") @persistent_watermark.command('unicast') -def pwm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_uni(namespace): """Show persistent WM for unicast queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_uni'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'multicast' subcommand ("show queue persistent-watermarks multicast") @persistent_watermark.command('multicast') -def pwm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_multi(namespace): """Show persistent WM for multicast queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_multi'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'all' subcommand ("show queue persistent-watermarks all") @persistent_watermark.command('all') -def pwm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_all(namespace): """Show persistent WM for all queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_all'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # @@ -840,15 +906,35 @@ def watermark(): pass @watermark.command('headroom') -def wm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_pg_headroom(namespace): """Show user headroom WM for pg""" command = ['watermarkstat', '-t', 'pg_headroom'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @watermark.command('shared') -def wm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_pg_shared(namespace): """Show user shared WM for pg""" command = ['watermarkstat', '-t', 'pg_shared'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group() @@ -871,15 +957,36 @@ def persistent_watermark(): pass @persistent_watermark.command('headroom') -def pwm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_pg_headroom(namespace): """Show persistent headroom WM for pg""" command = ['watermarkstat', '-p', '-t', 'pg_headroom'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('shared') -def pwm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_pg_shared(namespace): """Show persistent shared WM for pg""" command = ['watermarkstat', '-p', '-t', 'pg_shared'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -892,15 +999,36 @@ def buffer_pool(): """Show details of the buffer pools""" @buffer_pool.command('watermark') -def wm_buffer_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_buffer_pool(namespace): """Show user WM for buffer pools""" - command = ['watermarkstat', '-t' ,'buffer_pool'] + command = ['watermarkstat', '-t', 'buffer_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @buffer_pool.command('persistent-watermark') -def pwm_buffer_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_buffer_pool(namespace): """Show persistent WM for buffer pools""" command = ['watermarkstat', '-p', '-t', 'buffer_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -913,15 +1041,36 @@ def headroom_pool(): """Show details of headroom pool""" @headroom_pool.command('watermark') -def wm_headroom_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_headroom_pool(namespace): """Show user WM for headroom pool""" command = ['watermarkstat', '-t', 'headroom_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @headroom_pool.command('persistent-watermark') -def pwm_headroom_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_headroom_pool(namespace): """Show persistent WM for headroom pool""" command = ['watermarkstat', '-p', '-t', 'headroom_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -1539,7 +1688,7 @@ def ntp(verbose): """Show NTP running configuration""" ntp_servers = [] ntp_dict = {} - with open("/etc/ntp.conf") as ntp_file: + with open("/etc/ntpsec/ntp.conf") as ntp_file: data = ntp_file.readlines() for line in data: if line.startswith("server "): @@ -2008,10 +2157,13 @@ def policer(policer_name, verbose): # 'ecn' command ("show ecn") # @cli.command('ecn') +@multi_asic_util.multi_asic_click_option_namespace @click.option('--verbose', is_flag=True, help="Enable verbose output") -def ecn(verbose): +def ecn(namespace, verbose): """Show ECN configuration""" cmd = ['ecnconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd, display_cmd=verbose) @@ -2030,9 +2182,22 @@ def boot(): # 'mmu' command ("show mmu") # @cli.command('mmu') -def mmu(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +def mmu(namespace, verbose): """Show mmu configuration""" cmd = ['mmuconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if verbose: + cmd += ['-vv'] run_command(cmd) # @@ -2046,10 +2211,25 @@ def buffer(): # # 'configuration' command ("show buffer command") # + + @buffer.command() -def configuration(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +def configuration(namespace, verbose): """show buffer configuration""" cmd = ['mmuconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if verbose: + cmd += ['-vv'] run_command(cmd) # @@ -2092,6 +2272,138 @@ def ztp(status, verbose): run_command(cmd, display_cmd=verbose) +# +# 'bmp' group ("show bmp ...") +# +@cli.group(cls=clicommon.AliasedGroup) +def bmp(): + """Show details of the bmp dataset""" + pass + + +# 'bgp-neighbor-table' subcommand ("show bmp bgp-neighbor-table") +@bmp.command('bgp-neighbor-table') +@clicommon.pass_db +def bmp_neighbor_table(db): + """Show bmp bgp-neighbor-table information""" + bmp_headers = ["Neighbor_Address", "Peer_Address", "Peer_ASN", "Peer_RD", "Peer_Port", + "Local_Address", "Local_ASN", "Local_Port", "Advertised_Capabilities", "Received_Capabilities"] + + # BGP_NEIGHBOR_TABLE|10.0.1.2 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_NEIGHBOR_TABLE|*") + + click.echo("Total number of bmp neighbors: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + values["peer_addr"], # Neighbor_Address + values["peer_addr"], + values["peer_asn"], + values["peer_rd"], + values["peer_port"], + values["local_addr"], + values["local_asn"], + values["local_port"], + values["sent_cap"], + values["recv_cap"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'bmp-rib-out-table' subcommand ("show bmp bgp-rib-out-table") +@bmp.command('bgp-rib-out-table') +@clicommon.pass_db +def bmp_rib_out_table(db): + """Show bmp bgp-rib-out-table information""" + bmp_headers = ["Neighbor_Address", "NLRI", "Origin", "AS_Path", "Origin_AS", "Next_Hop", "Local_Pref", + "Originator_ID", "Community_List", "Ext_Community_List"] + + # BGP_RIB_OUT_TABLE|192.181.168.0/25|10.0.0.59 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_RIB_OUT_TABLE|*") + delimiter = db.db.get_db_separator(BMP_STATE_DB) + + click.echo("Total number of bmp bgp-rib-out-table: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + key_values = key.split(delimiter) + if len(key_values) < 3: + continue + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + key_values[2], # Neighbor_Address + key_values[1], # NLRI + values["origin"], + values["as_path"], + values["origin_as"], + values["next_hop"], + values["local_pref"], + values["originator_id"], + values["community_list"], + values["ext_community_list"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'bgp-rib-in-table' subcommand ("show bmp bgp-rib-in-table") +@bmp.command('bgp-rib-in-table') +@clicommon.pass_db +def bmp_rib_in_table(db): + """Show bmp bgp-rib-in-table information""" + bmp_headers = ["Neighbor_Address", "NLRI", "Origin", "AS_Path", "Origin_AS", "Next_Hop", "Local_Pref", + "Originator_ID", "Community_List", "Ext_Community_List"] + + # BGP_RIB_IN_TABLE|20c0:ef50::/64|10.0.0.57 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_RIB_IN_TABLE|*") + delimiter = db.db.get_db_separator(BMP_STATE_DB) + + click.echo("Total number of bmp bgp-rib-in-table: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + key_values = key.split(delimiter) + if len(key_values) < 3: + continue + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + key_values[2], # Neighbor_Address + key_values[1], # NLRI + values["origin"], + values["as_path"], + values["origin_as"], + values["next_hop"], + values["local_pref"], + values["originator_id"], + values["community_list"], + values["ext_community_list"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'tables' subcommand ("show bmp tables") +@bmp.command('tables') +@clicommon.pass_db +def tables(db): + """Show bmp table status information""" + bmp_headers = ["Table_Name", "Enabled"] + bmp_body = [] + click.echo("BMP tables: ") + bmp_keys = db.cfgdb.get_table('BMP') + if bmp_keys['table']: + bmp_body.append(['bgp_neighbor_table', bmp_keys['table']['bgp_neighbor_table']]) + bmp_body.append(['bgp_rib_in_table', bmp_keys['table']['bgp_rib_in_table']]) + bmp_body.append(['bgp_rib_out_table', bmp_keys['table']['bgp_rib_out_table']]) + click.echo(tabulate(bmp_body, bmp_headers)) + + # # 'bfd' group ("show bfd ...") # @@ -2156,6 +2468,17 @@ def peer(db, peer_ip): click.echo(tabulate(bfd_body, bfd_headers)) +# 'suppress-fib-pending' subcommand ("show suppress-fib-pending") +@cli.command('suppress-fib-pending') +@clicommon.pass_db +def suppress_pending_fib(db): + """ Show the status of suppress pending FIB feature """ + + field_values = db.cfgdb.get_entry('DEVICE_METADATA', 'localhost') + state = field_values.get('suppress-fib-pending', 'disabled').title() + click.echo(state) + + # asic-sdk-health-event subcommand ("show asic-sdk-health-event") @cli.group(cls=clicommon.AliasedGroup) def asic_sdk_health_event(): @@ -2253,6 +2576,46 @@ def received(db, namespace): ctx.fail("ASIC/SDK health event is not supported on the platform") +# +# 'serial_console' command group ("show serial_console ...") +# +@cli.group('serial_console', invoke_without_command=True) +@clicommon.pass_db +def serial_console(db): + """Show serial_console configuration""" + + serial_console_table = db.cfgdb.get_entry('SERIAL_CONSOLE', 'POLICIES') + + hdrs = ['inactivity-timeout', 'sysrq-capabilities'] + data = [] + + data.append(serial_console_table.get('inactivity_timeout', '900 ')) + data.append(serial_console_table.get('sysrq_capabilities', 'disabled ')) + + configuration = [data] + click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) + + +# +# 'ssh' command group ("show ssh ...") +# +@cli.group('ssh', invoke_without_command=True) +@clicommon.pass_db +def ssh(db): + """Show ssh configuration""" + + serial_console_table = db.cfgdb.get_entry('SSH_SERVER', 'POLICIES') + + hdrs = ['inactivity-timeout', 'max-sessions'] + data = [] + + data.append(serial_console_table.get('inactivity_timeout', '900 ')) + data.append(serial_console_table.get('max_session', '0 ')) + + configuration = [data] + click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) + + # # 'banner' command group ("show banner ...") # diff --git a/sonic-utilities-data/templates/timer.unit.j2 b/sonic-utilities-data/templates/timer.unit.j2 deleted file mode 100644 index 09989f2c51..0000000000 --- a/sonic-utilities-data/templates/timer.unit.j2 +++ /dev/null @@ -1,19 +0,0 @@ -# -# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== -# auto-generated from {{ source }} by sonic-package-manager -# -[Unit] -Description=Delays {{ manifest.service.name }} until SONiC has started -PartOf={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service - -[Timer] -OnUnitActiveSec=0 sec -OnBootSec=3min 30 sec -Unit={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service - -[Install] -WantedBy=timers.target sonic.target sonic-delayed.target -{%- for service in manifest.service["wanted-by"] %} -WantedBy={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service -{%- endfor %} - diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index ac327feb4c..d6492171ab 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -71,6 +71,8 @@ class AbootBootloader(Bootloader): def _boot_config_read(self, path=BOOT_CONFIG_PATH): config = collections.OrderedDict() + if not os.path.exists(path): + return config with open(path) as f: for line in f.readlines(): line = line.strip() @@ -112,7 +114,10 @@ def get_installed_images(self): def get_next_image(self): config = self._boot_config_read() - match = re.search(r"flash:/*(\S+)/", config['SWI']) + swi = config.get('SWI', '') + match = re.search(r"flash:/*(\S+)/", swi) + if not match: + return swi.split(':', 1)[-1] return match.group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX, 1) def set_default_image(self, image): diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index d76ddcc0c7..029ebf34f1 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -164,7 +164,7 @@ def is_secure_upgrade_image_verification_supported(self): if ! [ -n "$(ls -A /sys/firmware/efi/efivars 2>/dev/null)" ]; then mount -t efivarfs none /sys/firmware/efi/efivars 2>/dev/null fi - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") + SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") else echo "efi not supported - exiting without verification" exit 1 diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py index 57f8ac4624..c88e96a44a 100644 --- a/sonic_package_manager/service_creator/creator.py +++ b/sonic_package_manager/service_creator/creator.py @@ -31,7 +31,6 @@ SERVICE_FILE_TEMPLATE = 'sonic.service.j2' -TIMER_UNIT_TEMPLATE = 'timer.unit.j2' SYSTEMD_LOCATION = '/usr/lib/systemd/system' ETC_SYSTEMD_LOCATION = '/etc/systemd/system' @@ -305,7 +304,7 @@ def generate_service_mgmt(self, package: Package): log.info(f'generated {script_path}') def generate_systemd_service(self, package: Package): - """ Generates systemd service(s) file and timer(s) (if needed) for package. + """ Generates systemd service(s) file for package. Args: package: Package object to generate service for. @@ -333,23 +332,6 @@ def generate_systemd_service(self, package: Package): render_template(template, output_file, template_vars) log.info(f'generated {output_file}') - if package.manifest['service']['delayed']: - template_vars = { - 'source': get_tmpl_path(TIMER_UNIT_TEMPLATE), - 'manifest': package.manifest.unmarshal(), - 'multi_instance': False, - } - output_file = os.path.join(SYSTEMD_LOCATION, f'{name}.timer') - template = os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE) - render_template(template, output_file, template_vars) - log.info(f'generated {output_file}') - - if package.manifest['service']['asic-service']: - output_file = os.path.join(SYSTEMD_LOCATION, f'{name}@.timer') - template_vars['multi_instance'] = True - render_template(template, output_file, template_vars) - log.info(f'generated {output_file}') - def update_generated_services_conf_file(self, package: Package, remove=False): """ Updates generated_services.conf file. diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index 43b6c309fe..32a155206c 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -105,8 +105,7 @@ def update(self, old_manifest: Manifest, new_manifest: Manifest): """ Migrate feature configuration. It can be that non-configurable - feature entries have to be updated. e.g: "delayed" for example if - the new feature introduces a service timer or name of the service has + feature entries have to be updated. e.g: name of the service has changed, but user configurable entries are not changed). Args: diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py index 681e3d2c13..f59341a487 100755 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -126,7 +126,13 @@ def mock_run_command_side_effect(*args, **kwargs): - return '', 0 + print("command: {}".format(*args)) + if isinstance(*args, list): + return '', 0 + else: + print("Expected type of command is list. Actual type is {}".format(*args)) + assert 0 + return '', 0 class TestChassisModules(object): diff --git a/tests/cli_sessions_test.py b/tests/cli_sessions_test.py new file mode 100644 index 0000000000..755b232708 --- /dev/null +++ b/tests/cli_sessions_test.py @@ -0,0 +1,32 @@ +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + + +class TestCliSessionsCommands: + def test_config_command(self): + runner = CliRunner() + + db = Db() + + result = runner.invoke(config.config.commands['serial_console'].commands['sysrq-capabilities'], + ['enabled'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['serial_console'].commands['inactivity-timeout'], + ['180'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands['serial_console'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['ssh'].commands['inactivity-timeout'], ['190'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['ssh'].commands['max-sessions'], ['60'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands['ssh'], obj=db) + assert result.exit_code == 0 diff --git a/tests/config_override_input/golden_input_yang_failure.json b/tests/config_override_input/golden_input_yang_failure.json deleted file mode 100644 index 4b533e1598..0000000000 --- a/tests/config_override_input/golden_input_yang_failure.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "running_config": { - "ACL_TABLE": { - "DATAACL": { - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - "NTP_ACL": { - "policy_desc": "NTP_ACL", - "services": [ - "NTP" - ], - "stage": "ingress", - "type": "CTRLPLANE" - } - }, - "AUTO_TECHSUPPORT_FEATURE": { - "bgp": { - "rate_limit_interval": "600", - "state": "enabled" - }, - "database": { - "rate_limit_interval": "600", - "state": "enabled" - } - }, - "PORT": { - "Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": "1", - "lanes": "29,30,31,32", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - } - } - }, - "golden_config": { - "ACL_TABLE": { - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet0" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "AUTO_TECHSUPPORT_FEATURE": { - "bgp": { - "state": "disabled" - }, - "database": { - "state": "disabled" - } - }, - "PORT": { - "Ethernet12": { - "admin_status": "up", - "alias": "fortyGigE0/12", - "description": "Servers2:eth0", - "index": "3", - "lanes": "37,38,39,40", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - } - } - } -} diff --git a/tests/config_override_input/partial_config_override.json b/tests/config_override_input/partial_config_override.json index 2021ea282b..f28a8ed7ae 100644 --- a/tests/config_override_input/partial_config_override.json +++ b/tests/config_override_input/partial_config_override.json @@ -71,6 +71,30 @@ "stage": "ingress", "type": "CTRLPLANE" } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } } }, "expected_config": { diff --git a/tests/config_override_test.py b/tests/config_override_test.py index a46be5ef60..5137585832 100644 --- a/tests/config_override_test.py +++ b/tests/config_override_test.py @@ -20,7 +20,6 @@ EMPTY_TABLE_REMOVAL = os.path.join(DATA_DIR, "empty_table_removal.json") AAA_YANG_HARD_CHECK = os.path.join(DATA_DIR, "aaa_yang_hard_check.json") RUNNING_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "running_config_yang_failure.json") -GOLDEN_INPUT_YANG_FAILURE = os.path.join(DATA_DIR, "golden_input_yang_failure.json") FINAL_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "final_config_yang_failure.json") MULTI_ASIC_MACSEC_OV = os.path.join(DATA_DIR, "multi_asic_macsec_ov.json") MULTI_ASIC_FEATURE_RM = os.path.join(DATA_DIR, "multi_asic_feature_rm.json") @@ -179,7 +178,7 @@ def read_json_file_side_effect(filename): ['golden_config_db.json'], obj=db) assert result.exit_code != 0 - assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + assert "Authentication with 'tacacs+' is not allowed when passkey not exists." in result.output def check_override_config_table(self, db, config, running_config, golden_config, expected_config): @@ -233,17 +232,6 @@ def is_yang_config_validation_enabled_side_effect(filename): self.check_yang_verification_failure( db, config, read_data['running_config'], read_data['golden_config'], "running config") - def test_golden_input_yang_failure(self): - def is_yang_config_validation_enabled_side_effect(filename): - return True - db = Db() - with open(GOLDEN_INPUT_YANG_FAILURE, "r") as f: - read_data = json.load(f) - with mock.patch('config.main.device_info.is_yang_config_validation_enabled', - mock.MagicMock(side_effect=is_yang_config_validation_enabled_side_effect)): - self.check_yang_verification_failure( - db, config, read_data['running_config'], read_data['golden_config'], "config_input") - def test_final_config_yang_failure(self): def is_yang_config_validation_enabled_side_effect(filename): return True diff --git a/tests/config_test.py b/tests/config_test.py index ba7c8b526c..e9619da592 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -18,6 +18,7 @@ from click.testing import CliRunner from sonic_py_common import device_info, multi_asic +from utilities_common import flock from utilities_common.db import Db from utilities_common.general import load_module_from_source from mock import call, patch, mock_open, MagicMock @@ -45,6 +46,23 @@ load_minigraph_platform_false_path = os.path.join(load_minigraph_input_path, "platform_false") load_minigraph_command_output="""\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db +Running command: config qos reload --no-dynamic-buffer --no-delay +Running command: pfcwd start_default +Restarting SONiC target ... +Reloading Monit configuration ... +Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. +Released lock on {0} +""" + +load_minigraph_lock_failure_output = """\ +Failed to acquire lock on {0} +""" + +load_minigraph_command_bypass_lock_output = """\ +Bypass lock on {} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db Running command: config qos reload --no-dynamic-buffer --no-delay @@ -55,6 +73,7 @@ """ load_minigraph_platform_plugin_command_output="""\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db Running command: config qos reload --no-dynamic-buffer --no-delay @@ -63,6 +82,7 @@ Restarting SONiC target ... Reloading Monit configuration ... Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. +Released lock on {0} """ load_mgmt_config_command_ipv4_only_output="""\ @@ -137,6 +157,20 @@ """ RELOAD_CONFIG_DB_OUTPUT = """\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +Released lock on {0} +""" + +RELOAD_CONFIG_DB_LOCK_FAILURE_OUTPUT = """\ +Failed to acquire lock on {0} +""" + +RELOAD_CONFIG_DB_BYPASS_LOCK_OUTPUT = """\ +Bypass lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... @@ -144,44 +178,55 @@ """ RELOAD_YANG_CFG_OUTPUT = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -Y /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ RELOAD_MASIC_CONFIG_DB_OUTPUT = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic0 --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic1 --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ reload_config_with_sys_info_command_output="""\ +Acquired lock on {0} Running command: /usr/local/bin/sonic-cfggen -H -k Seastone-DX010-25-50 --write-to-db""" reload_config_with_disabled_service_output="""\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ reload_config_masic_onefile_output = """\ +Acquired lock on {0} Stopping SONiC target ... Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ reload_config_masic_onefile_gen_sysinfo_output = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -k Mellanox-SN3800-D112C8 --write-to-db Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic0 --write-to-db Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic1 --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ save_config_output = """\ @@ -601,7 +646,8 @@ def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')][:2]) == \ + reload_config_with_sys_info_command_output.format(config.SYSTEM_RELOAD_LOCK) def test_config_reload_stdin(self, get_cmd_module, setup_single_broadcom_asic): def mock_json_load(f): @@ -641,7 +687,8 @@ def mock_json_load(f): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')][:2]) == \ + reload_config_with_sys_info_command_output.format(config.SYSTEM_RELOAD_LOCK) @classmethod def teardown_class(cls): @@ -747,7 +794,8 @@ def read_json_file_side_effect(filename): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == reload_config_masic_onefile_output + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == \ + reload_config_masic_onefile_output.format(config.SYSTEM_RELOAD_LOCK) def test_config_reload_onefile_gen_sysinfo_masic(self): def read_json_file_side_effect(filename): @@ -823,7 +871,7 @@ def read_json_file_side_effect(filename): assert result.exit_code == 0 assert "\n".join( [li.rstrip() for li in result.output.split('\n')] - ) == reload_config_masic_onefile_gen_sysinfo_output + ) == reload_config_masic_onefile_gen_sysinfo_output.format(config.SYSTEM_RELOAD_LOCK) def test_config_reload_onefile_bad_format_masic(self): def read_json_file_side_effect(filename): @@ -878,11 +926,58 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): print(result.output) traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + (load_minigraph_command_output.format(config.SYSTEM_RELOAD_LOCK)) # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) assert mock_run_command.call_count == 12 + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_lock_failure(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, _) = get_cmd_module + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert result.output == \ + (load_minigraph_lock_failure_output.format(config.SYSTEM_RELOAD_LOCK)) + assert mock_run_command.call_count == 0 + finally: + flock.release_flock(fd) + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_bypass_lock(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, _) = get_cmd_module + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y", "-b"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert result.output == \ + load_minigraph_command_bypass_lock_output.format(config.SYSTEM_RELOAD_LOCK) + assert mock_run_command.call_count == 12 + finally: + flock.release_flock(fd) + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_path, None))) def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broadcom_asic): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: @@ -893,7 +988,8 @@ def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broad print(result.output) traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_platform_plugin_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + (load_minigraph_platform_plugin_command_output.format(config.SYSTEM_RELOAD_LOCK)) # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) assert mock_run_command.call_count == 12 @@ -1032,7 +1128,6 @@ def read_json_file_side_effect(filename): }, "TACPLUS": { "global": { - "passkey": "" } } } @@ -1044,7 +1139,7 @@ def read_json_file_side_effect(filename): runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) assert result.exit_code != 0 - assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + assert "Authentication with 'tacacs+' is not allowed when passkey not exists." in result.output @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) def test_load_minigraph_with_traffic_shift_away(self, get_cmd_module): @@ -1171,7 +1266,59 @@ def test_reload_config(self, get_cmd_module, setup_single_broadcom_asic): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_CONFIG_DB_OUTPUT + == RELOAD_CONFIG_DB_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + + def test_reload_config_lock_failure(self, get_cmd_module, setup_single_broadcom_asic): + self.add_sysinfo_to_cfg_file() + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, show) = get_cmd_module + runner = CliRunner() + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_LOCK_FAILURE_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + finally: + flock.release_flock(fd) + + def test_reload_config_bypass_lock(self, get_cmd_module, setup_single_broadcom_asic): + self.add_sysinfo_to_cfg_file() + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, show) = get_cmd_module + runner = CliRunner() + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f', '-b']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_BYPASS_LOCK_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + finally: + flock.release_flock(fd) def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broadcom_asic): self.add_sysinfo_to_cfg_file() @@ -1191,7 +1338,8 @@ def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broad assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == reload_config_with_disabled_service_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + reload_config_with_disabled_service_output.format(config.SYSTEM_RELOAD_LOCK) def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): self.add_sysinfo_to_cfg_file() @@ -1215,7 +1363,7 @@ def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_MASIC_CONFIG_DB_OUTPUT + == RELOAD_MASIC_CONFIG_DB_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) def test_reload_yang_config(self, get_cmd_module, setup_single_broadcom_asic): @@ -1234,7 +1382,7 @@ def test_reload_yang_config(self, get_cmd_module, traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_YANG_CFG_OUTPUT + == RELOAD_YANG_CFG_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) @classmethod def teardown_class(cls): diff --git a/tests/conftest.py b/tests/conftest.py index 5dd31d523a..3874668a67 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -194,7 +194,11 @@ def setup_single_bgp_instance(request): elif request.param == 'ipv6_route': bgp_mocked_json = 'ipv6_route.json' elif request.param == 'ip_special_route': - bgp_mocked_json = 'ip_special_route.json' + bgp_mocked_json = 'ip_special_route.json' + elif request.param == 'ip_route_lc': + bgp_mocked_json = 'ip_route_lc.json' + elif request.param == 'ip_route_remote_lc': + bgp_mocked_json = 'ip_route_remote_lc.json' else: bgp_mocked_json = os.path.join( test_path, 'mock_tables', 'dummy.json') @@ -240,7 +244,8 @@ def mock_run_bgp_route_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constan _old_run_bgp_command = bgp_util.run_bgp_command if any([request.param == 'ip_route', request.param == 'ip_specific_route', request.param == 'ip_special_route', - request.param == 'ipv6_route', request.param == 'ipv6_specific_route']): + request.param == 'ipv6_route', request.param == 'ipv6_specific_route', + request.param == 'ip_route_lc', request.param == 'ip_route_remote_lc']): bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_run_bgp_route_command("", "")) elif request.param.startswith('ipv6_route_err'): @@ -303,6 +308,12 @@ def setup_multi_asic_bgp_instance(request): request.param.startswith('bgp_v4_neighbor') or \ request.param.startswith('bgp_v6_neighbor'): m_asic_json_file = request.param + elif request.param == 'ip_route_lc': + m_asic_json_file = 'ip_route_lc.json' + elif request.param == 'ip_route_remote_lc': + m_asic_json_file = 'ip_route_remote_lc.json' + elif request.param == 'ip_route_lc_2': + m_asic_json_file = 'ip_route_lc_2.json' else: m_asic_json_file = os.path.join( test_path, 'mock_tables', 'dummy.json') diff --git a/tests/counterpoll_test.py b/tests/counterpoll_test.py index 4a4da07ee9..6c165498c5 100644 --- a/tests/counterpoll_test.py +++ b/tests/counterpoll_test.py @@ -2,6 +2,7 @@ import json import os import pytest +import mock import sys from click.testing import CliRunner from shutil import copyfile @@ -31,6 +32,21 @@ FLOW_CNT_ROUTE_STAT 10000 enable """ +expected_counterpoll_show_dpu = """Type Interval (in ms) Status +-------------------- ------------------ -------- +QUEUE_STAT 10000 enable +PORT_STAT 1000 enable +PORT_BUFFER_DROP 60000 enable +QUEUE_WATERMARK_STAT default (60000) enable +PG_WATERMARK_STAT default (60000) enable +PG_DROP_STAT 10000 enable +ACL 5000 enable +TUNNEL_STAT 3000 enable +FLOW_CNT_TRAP_STAT 10000 enable +FLOW_CNT_ROUTE_STAT 10000 enable +ENI_STAT 1000 enable +""" + class TestCounterpoll(object): @classmethod def setup_class(cls): @@ -44,6 +60,13 @@ def test_show(self): print(result.output) assert result.output == expected_counterpoll_show + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_show_dpu(self, mock_get_platform_info): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + result = runner.invoke(counterpoll.cli.commands["show"], []) + assert result.output == expected_counterpoll_show_dpu + def test_port_buffer_drop_interval(self): runner = CliRunner() result = runner.invoke(counterpoll.cli.commands["port-buffer-drop"].commands["interval"], ["30000"]) @@ -221,6 +244,38 @@ def test_update_route_counter_interval(self): assert result.exit_code == 2 assert expected in result.output + @pytest.mark.parametrize("status", ["disable", "enable"]) + def test_update_eni_status(self, status): + runner = CliRunner() + result = runner.invoke(counterpoll.cli, ["eni", status]) + assert result.exit_code == 1 + assert result.output == "ENI counters are not supported on non DPU platforms\n" + + @pytest.mark.parametrize("status", ["disable", "enable"]) + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_update_eni_status_dpu(self, mock_get_platform_info, status): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + db = Db() + + result = runner.invoke(counterpoll.cli.commands["eni"].commands[status], [], obj=db.cfgdb) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert status == table["ENI"]["FLEX_COUNTER_STATUS"] + + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_update_eni_interval(self, mock_get_platform_info): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + db = Db() + test_interval = "2000" + + result = runner.invoke(counterpoll.cli.commands["eni"].commands["interval"], [test_interval], obj=db.cfgdb) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert test_interval == table["ENI"]["POLL_INTERVAL"] @classmethod def teardown_class(cls): diff --git a/tests/ecn_input/ecn_test_vectors.py b/tests/ecn_input/ecn_test_vectors.py index c53bf48a24..fe47f0b7a3 100644 --- a/tests/ecn_input/ecn_test_vectors.py +++ b/tests/ecn_input/ecn_test_vectors.py @@ -18,205 +18,356 @@ """ +ecn_show_config_output_specific_namespace = """\ +Profile: AZURE_LOSSLESS +----------------------- ------- +red_max_threshold 2097152 +ecn ecn_all +green_min_threshold 1048576 +red_min_threshold 1048576 +yellow_min_threshold 1048576 +green_max_threshold 2097152 +green_drop_probability 5 +yellow_max_threshold 2097152 +yellow_drop_probability 5 +red_drop_probability 5 +----------------------- ------- + +""" + +ecn_show_config_output_multi = """\ +Profile: AZURE_LOSSLESS +----------------------- ------- +red_max_threshold 2097152 +ecn ecn_all +green_min_threshold 1048576 +red_min_threshold 1048576 +yellow_min_threshold 1048576 +green_max_threshold 2097152 +green_drop_probability 5 +yellow_max_threshold 2097152 +yellow_drop_probability 5 +red_drop_probability 5 +----------------------- ------- + +Profile: AZURE_LOSSY +----------------------- ----- +red_max_threshold 32760 +red_min_threshold 4095 +yellow_max_threshold 32760 +yellow_min_threshold 4095 +green_max_threshold 32760 +green_min_threshold 4095 +yellow_drop_probability 2 +----------------------- ----- + +""" + testData = { - 'ecn_show_config' : {'cmd' : ['show'], - 'args' : [], - 'rc' : 0, - 'rc_output': ecn_show_config_output + 'ecn_show_config': {'cmd': ['show'], + 'args': [], + 'rc': 0, + 'rc_output': ecn_show_config_output }, - 'ecn_show_config_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-l', '-vv'], - 'rc' : 0, - 'rc_output': ecn_show_config_output + 'Total profiles: 1\n' + 'ecn_show_config_verbose': {'cmd': ['q_cmd'], + 'args': ['-l', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output + 'Total profiles: 1\n' }, - 'ecn_cfg_gmin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_min_threshold,1048600'] + 'ecn_cfg_gmin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_min_threshold,1048600'] }, - 'ecn_cfg_gmin_verbose' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600', '-vv'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_min_threshold,1048600'], - 'rc_output' : 'Running command: ecnconfig -p AZURE_LOSSLESS -gmin 1048600 -vv\nSetting green_min_threshold value to 1048600\n' + 'ecn_cfg_gmin_verbose': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600', '-vv'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_min_threshold,1048600'], + 'rc_output': ('Running command: ecnconfig -p AZURE_LOSSLESS -gmin 1048600 -vv\n' + 'Setting green_min_threshold value to 1048600\n') }, - 'ecn_cfg_gmax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_max_threshold,2097153'] + 'ecn_cfg_gmax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_max_threshold,2097153'] }, - 'ecn_cfg_ymin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_min_threshold,1048600'] + 'ecn_cfg_ymin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_min_threshold,1048600'] }, - 'ecn_cfg_ymax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_max_threshold,2097153'] + 'ecn_cfg_ymax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_max_threshold,2097153'] }, - 'ecn_cfg_rmin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_min_threshold,1048600'] + 'ecn_cfg_rmin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_min_threshold,1048600'] }, - 'ecn_cfg_rmax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_max_threshold,2097153'] + 'ecn_cfg_rmax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_max_threshold,2097153'] }, - 'ecn_cfg_rdrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rdrop', '10'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_drop_probability,10'] + 'ecn_cfg_rdrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rdrop', '10'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_drop_probability,10'] }, - 'ecn_cfg_ydrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ydrop', '11'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_drop_probability,11'] + 'ecn_cfg_ydrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ydrop', '11'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_drop_probability,11'] }, - 'ecn_cfg_gdrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12'] + 'ecn_cfg_gdrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12'] }, - 'ecn_cfg_gdrop_verbose' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12'], - 'rc_output' : 'Running command: ecnconfig -p AZURE_LOSSLESS -gdrop 12 -vv\nSetting green_drop_probability value to 12%\n' + 'ecn_cfg_gdrop_verbose': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12'], + 'rc_output': ('Running command: ecnconfig -p AZURE_LOSSLESS -gdrop 12 -vv\n' + 'Setting green_drop_probability value to 12%\n') }, - 'ecn_cfg_multi_set' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-gmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12', - 'AZURE_LOSSLESS,green_max_threshold,2097153' - ] + 'ecn_cfg_multi_set': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-gmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12', + ',AZURE_LOSSLESS,green_max_threshold,2097153'] }, - 'ecn_cfg_gmin_gmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153', '-gmin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid gmin (2097154) and gmax (2097153). gmin should be smaller than gmax' + 'ecn_cfg_gmin_gmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmax', + '2097153', '-gmin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid gmin (2097154) and gmax (2097153).' + ' gmin should be smaller than gmax') }, - 'ecn_cfg_ymin_ymax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153', '-ymin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid ymin (2097154) and ymax (2097153). ymin should be smaller than ymax' + 'ecn_cfg_ymin_ymax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymax', + '2097153', '-ymin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid ymin (2097154) and ymax (2097153).' + ' ymin should be smaller than ymax') }, - 'ecn_cfg_rmin_rmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153', '-rmin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid rmin (2097154) and rmax (2097153). rmin should be smaller than rmax' + 'ecn_cfg_rmin_rmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', + '2097153', '-rmin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid rmin (2097154) and rmax (2097153).' + ' rmin should be smaller than rmax') }, - 'ecn_cfg_rmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '-2097153'], - 'rc' : 1, - 'rc_msg' : 'Invalid rmax (-2097153). rmax should be an non-negative integer' - }, - 'ecn_cfg_rdrop_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rdrop', '105'], - 'rc' : 1, - 'rc_msg' : 'Invalid value for "-rdrop": 105 is not in the valid range of 0 to 100' + 'ecn_cfg_rmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', '-2097153'], + 'rc': 1, + 'rc_msg': 'Invalid rmax (-2097153). rmax should be an non-negative integer' }, - 'ecn_q_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 3: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_rdrop_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rdrop', '105'], + 'rc': 1, + 'rc_msg': 'Invalid value for "-rdrop": 105 is not in the valid range of 0 to 100' + }, + 'ecn_q_get': {'cmd': ['q_cmd'], + 'args': ['-q', '3'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 3: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_q_get_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', '-vv'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\n{0} queue 3: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR' + 'ecn_q_get_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3', '-vv'], + 'rc': 0, + 'rc_msg': 'ECN status:\n{0} queue 3: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR' }, - 'ecn_lossy_q_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '2'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 2: off\n', - 'cmp_args' : [None], - 'cmp_q_args' : ['2'] + 'ecn_lossy_q_get': {'cmd': ['q_cmd'], + 'args': ['-q', '2'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 2: off\n', + 'cmp_args': [',None,None'], + 'cmp_q_args': ['2'] }, - 'ecn_q_all_get_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', '-vv'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\n{0} queue 3: on\n{0} queue 4: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR' + 'ecn_q_all_get_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', '-vv'], + 'rc': 0, + 'rc_msg': 'ECN status:\n{0} queue 3: on\n{0} queue 4: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR' }, - 'ecn_q_all_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 3: on\nqueue 4: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_q_all_get': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 3: on\nqueue 4: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_all_off' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'off'], - 'rc' : 0, - 'cmp_args' : [None], - 'cmp_q_args' : ['3', '4'] - }, - 'ecn_cfg_q_all_off_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'off', '-vv'], - 'rc' : 0, - 'cmp_args' : [None], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Disable ECN on {0} queue 3\nDisable ECN on {0} queue 4' + 'ecn_cfg_q_all_off': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'off'], + 'rc': 0, + 'cmp_args': [',None,None'], + 'cmp_q_args': ['3', '4'] + }, + 'ecn_cfg_q_all_off_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'off', '-vv'], + 'rc': 0, + 'cmp_args': [',None,None'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Disable ECN on {0} queue 3\nDisable ECN on {0} queue 4' }, - 'ecn_cfg_q_off' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', 'off'], - 'rc' : 0, - 'cmp_args' : [None, 'wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3'], - 'other_q' : ['4'] + 'ecn_cfg_q_off': {'cmd': ['q_cmd'], + 'args': ['-q', '3', 'off'], + 'rc': 0, + 'cmp_args': [',None,None', ',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3'], + 'other_q': ['4'] }, - 'ecn_cfg_q_off_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', 'off', '-vv'], - 'rc' : 0, - 'cmp_args' : [None, 'wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3'], - 'other_q' : ['4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Disable ECN on {0} queue 3' + 'ecn_cfg_q_off_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3', 'off', '-vv'], + 'rc': 0, + 'cmp_args': [',None,None', ',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3'], + 'other_q': ['4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Disable ECN on {0} queue 3' }, - 'ecn_cfg_q_all_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_q_all_on': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_all_on_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'on', '-vv'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Enable ECN on {0} queue 3\nEnable ECN on {0} queue 4' + 'ecn_cfg_q_all_on_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'on', '-vv'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Enable ECN on {0} queue 3\nEnable ECN on {0} queue 4' }, - 'ecn_cfg_q_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '4', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_q_on': {'cmd': ['q_cmd'], + 'args': ['-q', '4', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_on_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '4', 'on', '-vv'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Enable ECN on {0} queue 4' + 'ecn_cfg_q_on_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '4', 'on', '-vv'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Enable ECN on {0} queue 4' }, - 'ecn_cfg_lossy_q_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '0,1,2,5,6,7', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['0', '1', '2', '5', '6', '7'] - } + 'ecn_cfg_lossy_q_on': {'cmd': ['q_cmd'], + 'args': ['-q', '0,1,2,5,6,7', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['0', '1', '2', '5', '6', '7'] + }, + 'ecn_show_config_masic': {'cmd': ['show_masic'], + 'args': ['-l'], + 'rc': 0, + 'rc_output': ecn_show_config_output_multi, + }, + 'test_ecn_show_config_verbose_masic': {'cmd': ['show_masic'], + 'args': ['-l', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output_multi + 'Total profiles: 2\n', + }, + 'test_ecn_show_config_namespace': {'cmd': ['show_masic'], + 'args': ['-l', '-n', 'asic0'], + 'rc': 0, + 'rc_output': ecn_show_config_output_specific_namespace, + }, + 'test_ecn_show_config_namespace_verbose': {'cmd': ['show_masic'], + 'args': ['-l', '-n', 'asic0', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output_specific_namespace + + 'Total profiles: 1\n', + }, + 'ecn_cfg_threshold_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSY', '-gmax', '35000', '-n', 'asic1'], + 'rc': 0, + 'cmp_args': ['asic1,AZURE_LOSSY,green_max_threshold,35000'] + }, + 'ecn_cfg_probability_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSY', '-ydrop', '3', '-n', 'asic1'], + 'rc': 0, + 'cmp_args': ['asic1,AZURE_LOSSY,yellow_drop_probability,3'] + }, + 'ecn_cfg_gdrop_verbose_all_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], + 'rc': 0, + 'cmp_args': ['asic0-asic1,AZURE_LOSSLESS,green_drop_probability,12'], + 'rc_output': ('Setting green_drop_probability value to 12% ' + 'for namespace asic0\n' + 'Setting green_drop_probability value to 12% ' + 'for namespace asic1\n') + }, + 'ecn_cfg_multi_set_verbose_all_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSLESS', '-gdrop', + '14', '-gmax', '2097153', '-vv'], + 'rc': 0, + 'cmp_args': [('asic0-asic1,AZURE_LOSSLESS,' + 'green_drop_probability,14'), + ('asic0-asic1,AZURE_LOSSLESS,' + 'green_max_threshold,2097153')], + 'rc_output': ('Setting green_max_threshold value to 2097153 ' + 'for namespace asic0\n' + 'Setting green_max_threshold value to 2097153 ' + 'for namespace asic1\n' + 'Setting green_drop_probability value to 14% ' + 'for namespace asic0\n' + 'Setting green_drop_probability value to 14% ' + 'for namespace asic1\n') + }, + 'ecn_q_get_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', '-n', 'asic0'], + 'rc': 0, + 'rc_msg': 'ECN status for namespace asic0:\nqueue 1: on\n', + 'cmp_args': ['asic0,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['1'] + }, + 'ecn_q_get_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', '-vv', '-n', 'asic0'], + 'rc': 0, + 'rc_msg': 'ECN status for namespace asic0:\nEthernet4 queue 1: on\n', + 'cmp_args': ['asic0,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['1'], + 'db_table': 'DEVICE_NEIGHBOR' + }, + 'ecn_q_get_all_ns_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0'], + 'rc': 0, + 'rc_msg': ('ECN status for namespace asic0:\nqueue 0: off\n' + 'ECN status for namespace asic1:\nqueue 0: on\n') + }, + 'ecn_q_get_all_ns_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0', '-vv'], + 'rc': 0, + 'rc_msg': ('ECN status for namespace asic0:\nEthernet4 queue 0: off\n' + 'ECN status for namespace asic1:\nEthernet0 queue 0: on\n') + }, + 'ecn_cfg_q_all_ns_off_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0,1', 'off'], + 'rc': 0, + 'cmp_args': ['asic0-asic1,None,None'], + 'cmp_q_args': ['0', '1'] + }, + 'ecn_cfg_q_one_ns_off_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', 'on', '-n', 'asic1', '-vv'], + 'rc': 0, + 'rc_msg': 'Enable ECN on Ethernet0 queue 1\n', + 'cmp_args': ['asic1,wred_profile,AZURE_LOSSLESS', + 'asic1,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['0'], + 'other_q': ['1'] + } } diff --git a/tests/ecn_test.py b/tests/ecn_test.py index 13474b12e8..5d2ac36011 100644 --- a/tests/ecn_test.py +++ b/tests/ecn_test.py @@ -6,11 +6,15 @@ from click.testing import CliRunner import config.main as config -from .ecn_input.ecn_test_vectors import * +from .ecn_input.ecn_test_vectors import testData from .utils import get_result_and_return_code from utilities_common.db import Db import show.main as show +# Constants +ARGS_DELIMITER = ',' +NAMESPACE_DELIMITER = '-' + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") @@ -18,13 +22,107 @@ sys.path.insert(0, modules_path) -class TestEcnConfig(object): +class TestEcnConfigBase(object): @classmethod def setup_class(cls): + print("SETUP") os.environ["PATH"] += os.pathsep + scripts_path os.environ['UTILITIES_UNIT_TESTING'] = "2" - print("SETUP") + def process_cmp_args(self, cmp_args): + """ + The arguments are a string marked by delimiters + Arguments marked as 'None', are treated as None objects + First arg is always a collection of namespaces + """ + + args = cmp_args.split(ARGS_DELIMITER) + args = [None if arg == "None" else arg for arg in args] + args[0] = args[0].split(NAMESPACE_DELIMITER) + return args + + def verify_profile(self, queue_db_entry, profile, value): + if profile is not None: + assert queue_db_entry[profile] == value + else: + assert profile not in queue_db_entry,\ + "Profile needs to be fully removed from table to propagate NULL OID to SAI" + + def executor(self, input): + runner = CliRunner() + + if 'db_table' in input: + db = Db() + data_list = list(db.cfgdb.get_table(input['db_table'])) + input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) + + if 'show' in input['cmd']: + exec_cmd = show.cli.commands["ecn"] + result = runner.invoke(exec_cmd, input['args']) + exit_code = result.exit_code + output = result.output + elif 'q_cmd' in input['cmd'] or 'show_masic' in input['cmd'] or 'config_masic' in input['cmd']: + exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) + else: + exec_cmd = config.config.commands["ecn"] + result = runner.invoke(exec_cmd, input['args']) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + if input['rc'] == 0: + assert exit_code == 0 + else: + assert exit_code != 0 + + if 'cmp_args' in input: + fd = open('/tmp/ecnconfig', 'r') + cmp_data = json.load(fd) + + # Verify queue assignments + if 'cmp_q_args' in input: + namespaces, profile, value = self.process_cmp_args(input['cmp_args'][0]) + for namespace in namespaces: + for key in cmp_data[namespace]: + queue_idx = ast.literal_eval(key)[-1] + if queue_idx in input['cmp_q_args']: + self.verify_profile(cmp_data[namespace][key], profile, value) + + # other_q helps verify two different queue assignments + if 'other_q' in input: + namespaces1, profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) + for namespace1 in namespaces1: + for key in cmp_data[namespace1]: + queue_idx = ast.literal_eval(key)[-1] + if 'other_q' in input and queue_idx in input['other_q']: + self.verify_profile(cmp_data[namespace1][key], profile1, value1) + # Verify non-queue related assignments + else: + for args in input['cmp_args']: + namespaces, profile, name, value = self.process_cmp_args(args) + for namespace in namespaces: + assert(cmp_data[namespace][profile][name] == value) + fd.close() + + if 'rc_msg' in input: + assert input['rc_msg'] in output + + if 'rc_output' in input: + assert output == input['rc_output'] + + @classmethod + def teardown_class(cls): + os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + + if os.path.isfile('/tmp/ecnconfig'): + os.remove('/tmp/ecnconfig') + print("TEARDOWN") + + +class TestEcnConfig(TestEcnConfigBase): def test_ecn_show_config(self): self.executor(testData['ecn_show_config']) @@ -123,77 +221,3 @@ def test_ecn_queue_set_all_on_verbose(self): def test_ecn_queue_set_lossy_q_on(self): self.executor(testData['ecn_cfg_lossy_q_on']) - - def process_cmp_args(self, cmp_args): - if cmp_args is None: - return (None, None) - return cmp_args.split(',') - - def verify_profile(self, queue_db_entry, profile, value): - if profile != None: - assert queue_db_entry[profile] == value - else: - assert profile not in queue_db_entry,\ - "Profile needs to be fully removed from table to propagate NULL OID to SAI" - - def executor(self, input): - runner = CliRunner() - - if 'db_table' in input: - db = Db() - data_list = list(db.cfgdb.get_table(input['db_table'])) - input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) - - if 'show' in input['cmd']: - exec_cmd = show.cli.commands["ecn"] - result = runner.invoke(exec_cmd, input['args']) - exit_code = result.exit_code - output = result.output - elif 'q_cmd' in input['cmd'] : - exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) - else: - exec_cmd = config.config.commands["ecn"] - result = runner.invoke(exec_cmd, input['args']) - exit_code = result.exit_code - output = result.output - - print(exit_code) - print(output) - - if input['rc'] == 0: - assert exit_code == 0 - else: - assert exit_code != 0 - - if 'cmp_args' in input: - fd = open('/tmp/ecnconfig', 'r') - cmp_data = json.load(fd) - if 'cmp_q_args' in input: - profile, value = self.process_cmp_args(input['cmp_args'][0]) - if 'other_q' in input: - profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) - for key in cmp_data: - queue_idx = ast.literal_eval(key)[-1] - if queue_idx in input['cmp_q_args']: - self.verify_profile(cmp_data[key], profile, value) - if 'other_q' in input and queue_idx in input['other_q']: - self.verify_profile(cmp_data[key], profile1, value1) - else: - for args in input['cmp_args']: - profile, name, value = args.split(',') - assert(cmp_data[profile][name] == value) - fd.close() - - if 'rc_msg' in input: - assert input['rc_msg'] in output - - if 'rc_output' in input: - assert output == input['rc_output'] - - @classmethod - def teardown_class(cls): - os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1]) - os.environ['UTILITIES_UNIT_TESTING'] = "0" - if os.path.isfile('/tmp/ecnconfig'): - os.remove('/tmp/ecnconfig') - print("TEARDOWN") diff --git a/tests/flock_test.py b/tests/flock_test.py new file mode 100644 index 0000000000..7d9039dd2d --- /dev/null +++ b/tests/flock_test.py @@ -0,0 +1,187 @@ +import pytest +import tempfile +import threading +import time + +from unittest import mock +from utilities_common import flock + + +f0_exit = threading.Event() +f1_exit = threading.Event() +f2_exit = threading.Event() + + +def dummy_f0(): + while not f0_exit.is_set(): + time.sleep(1) + + +def dummy_f1(bypass_lock=False): + while not f1_exit.is_set(): + time.sleep(1) + + +def dummy_f2(bypass_lock=True): + while not f2_exit.is_set(): + time.sleep(1) + + +class TestFLock: + def setup(self): + print("SETUP") + f0_exit.clear() + f1_exit.clear() + f2_exit.clear() + + def test_flock_acquire_lock_non_blocking(self): + """Test flock non-blocking acquire lock.""" + with tempfile.NamedTemporaryFile() as fd0: + fd1 = open(fd0.name, "r") + + assert flock.acquire_flock(fd0.fileno(), 0) + assert not flock.acquire_flock(fd1.fileno(), 0) + + flock.release_flock(fd0.fileno()) + + assert flock.acquire_flock(fd1.fileno(), 0) + flock.release_flock(fd1.fileno()) + + def test_flock_acquire_lock_blocking(self): + """Test flock blocking acquire.""" + with tempfile.NamedTemporaryFile() as fd0: + fd1 = open(fd0.name, "r") + res = [] + + assert flock.acquire_flock(fd0.fileno(), 0) + thrd = threading.Thread(target=lambda: res.append(flock.acquire_flock(fd1.fileno(), -1))) + thrd.start() + + time.sleep(5) + assert thrd.is_alive() + + flock.release_flock(fd0.fileno()) + thrd.join() + assert len(res) == 1 and res[0] + + fd2 = open(fd0.name, "r") + assert not flock.acquire_flock(fd2.fileno(), 0) + + flock.release_flock(fd1.fileno()) + assert flock.acquire_flock(fd2.fileno(), 0) + flock.release_flock(fd2.fileno()) + + def test_flock_acquire_lock_timeout(self): + """Test flock timeout acquire.""" + with tempfile.NamedTemporaryFile() as fd0: + def acquire_helper(): + nonlocal elapsed + start = time.time() + res.append(flock.acquire_flock(fd1.fileno(), 5)) + end = time.time() + elapsed = end - start + + fd1 = open(fd0.name, "r") + elapsed = 0 + res = [] + + assert flock.acquire_flock(fd0.fileno(), 0) + thrd = threading.Thread(target=acquire_helper) + thrd.start() + + thrd.join() + assert ((len(res) == 1) and (not res[0])) + assert elapsed >= 5 + + flock.release_flock(fd0.fileno()) + + @mock.patch("click.echo") + def test_try_lock(self, mock_echo): + """Test try_lock decorator.""" + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f0_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f0) + f1_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f1) + + thrd = threading.Thread(target=f0_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}")] + assert b"dummy_f0" in get_file_content(fd0) + + with pytest.raises(SystemExit): + f1_with_try_lock() + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}"), + mock.call(f"Failed to acquire lock on {fd0.name}")] + finally: + f0_exit.set() + thrd.join() + + assert b"dummy_f0" not in get_file_content(fd0) + + thrd = threading.Thread(target=f1_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}"), + mock.call(f"Failed to acquire lock on {fd0.name}"), + mock.call(f"Released lock on {fd0.name}"), + mock.call(f"Acquired lock on {fd0.name}")] + assert b"dummy_f1" in get_file_content(fd0) + finally: + f1_exit.set() + thrd.join() + + assert b"dummy_f1" not in get_file_content(fd0) + + @mock.patch("click.echo") + def test_try_lock_with_bypass(self, mock_echo): + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f1_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f1) + + thrd = threading.Thread(target=f1_with_try_lock, args=(True,)) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Bypass lock on {fd0.name}")] + assert b"dummy_f1" not in get_file_content(fd0) + finally: + f1_exit.set() + thrd.join() + + @mock.patch("click.echo") + def test_try_lock_with_bypass_default(self, mock_echo): + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f2_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f2) + + thrd = threading.Thread(target=f2_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Bypass lock on {fd0.name}")] + assert b"dummy_f2" not in get_file_content(fd0) + finally: + f2_exit.set() + thrd.join() + + def teardown(self): + print("TEARDOWN") + f0_exit.clear() + f1_exit.clear() + f2_exit.clear() diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index 4a16a5ca4f..21f50e0b7b 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -361,6 +361,13 @@ def test_validate_lanes__same_valid_lanes_multi_ports_no_spaces__failure(self): }} self.validate_lanes(config, '67') + def test_validate_lanes_default_value_duplicate_check(self): + config = {"PORT": { + "Ethernet0": {"lanes": "0", "speed": "10000"}, + "Ethernet1": {"lanes": "0", "speed": "10000"}, + }} + self.validate_lanes(config) + def validate_lanes(self, config_db, expected_error=None): # Arrange config_wrapper = gu_common.ConfigWrapper() diff --git a/tests/installer_bootloader_aboot_test.py b/tests/installer_bootloader_aboot_test.py index fbe580a638..be09223b5f 100644 --- a/tests/installer_bootloader_aboot_test.py +++ b/tests/installer_bootloader_aboot_test.py @@ -8,6 +8,7 @@ # Constants image_dir = f'{aboot.IMAGE_DIR_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' +image_chainloader = f'{image_dir}/.sonic-boot.swi' exp_image = f'{aboot.IMAGE_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' image_dirs = [image_dir] @@ -45,15 +46,27 @@ def test_get_installed_images(): assert bootloader.get_installed_images() == [exp_image] -@patch("sonic_installer.bootloader.aboot.re.search") -def test_get_next_image(re_search_patch): +def test_get_next_image(): bootloader = aboot.AbootBootloader() - bootloader._boot_config_read = Mock(return_value={'SWI': None}) + + # Test missing boot-config + bootloader._boot_config_read() + + # Test missing SWI value + bootloader._boot_config_read = Mock(return_value={}) + assert bootloader.get_next_image() == '' # Test convertion image dir to image name - re_search_patch().group = Mock(return_value=image_dir) + swi = f'flash:{image_chainloader}' + bootloader._boot_config_read = Mock(return_value={'SWI': swi}) assert bootloader.get_next_image() == exp_image + # Test some other image + next_image = 'EOS.swi' + bootloader._boot_config_read = Mock(return_value={'SWI': f'flash:{next_image}'}) + assert bootloader.get_next_image() == next_image + + def test_install_image(): image_path = 'sonic' env = os.environ.copy() diff --git a/tests/ip_show_routes_multi_asic_test.py b/tests/ip_show_routes_multi_asic_test.py index bfce5e539d..08bea36910 100644 --- a/tests/ip_show_routes_multi_asic_test.py +++ b/tests/ip_show_routes_multi_asic_test.py @@ -1,10 +1,11 @@ import os from importlib import reload - import pytest + from . import show_ip_route_common from click.testing import CliRunner + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") diff --git a/tests/ip_show_routes_voq_chassis_test.py b/tests/ip_show_routes_voq_chassis_test.py new file mode 100644 index 0000000000..de7f7ade8f --- /dev/null +++ b/tests/ip_show_routes_voq_chassis_test.py @@ -0,0 +1,112 @@ +import os +from importlib import reload +import pytest +from unittest import mock + +import show.main as show +from . import show_ip_route_common +import utilities_common.multi_asic as multi_asic_util +from click.testing import CliRunner + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") + + +class TestMultiAsicVoqLcShowIpRouteDisplayAllCommands(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + from .mock_tables import mock_multi_asic + reload(mock_multi_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_lc( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["-dfrontend"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_remote_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_remote_lc( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["-dfrontend"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_REMOTE_LC + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_lc_def_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC_DEFAULT_ROUTE + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_remote_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_remote_lc_default_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_REMOTE_LC_DEFAULT_ROUTE + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc_2'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + @mock.patch.object(multi_asic_util.MultiAsic, "get_ns_list_based_on_options", + mock.MagicMock(return_value=["asic0", "asic1"])) + def test_voq_chassis_lc_def_route_2( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC_DEFAULT_ROUTE_2 + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + reload(mock_single_asic) diff --git a/tests/mmuconfig_input/mmuconfig_test_vectors.py b/tests/mmuconfig_input/mmuconfig_test_vectors.py index c20a964516..1d72ed6725 100644 --- a/tests/mmuconfig_input/mmuconfig_test_vectors.py +++ b/tests/mmuconfig_input/mmuconfig_test_vectors.py @@ -83,30 +83,267 @@ """ +show_mmu_config_asic0 = """\ +Pool for namespace asic0: ingress_lossy_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic0: ingress_lossless_pool_hbm +---- --------- +mode static +size 139458240 +type ingress +---- --------- + +Profile for namespace asic0: ingress_lossy_profile +---------- ------------------ +dynamic_th 3 +pool ingress_lossy_pool +size 0 +---------- ------------------ + +Profile for namespace asic0: ingress_lossless_profile_hbm +--------- ------------------------- +static_th 12121212 +pool ingress_lossless_pool_hbm +size 0 +--------- ------------------------- + +""" + +show_mmu_config_asic1_verbose = """\ +Pool for namespace asic1: ingress_lossless_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic1: egress_lossless_pool +---- -------- +mode dynamic +size 13945824 +type egress +---- -------- + +Pool for namespace asic1: egress_lossy_pool +---- ------- +mode dynamic +type egress +---- ------- + +Total pools: 3 + + +Profile for namespace asic1: alpha_profile +------------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +headroom_type dynamic +------------- --------------------- + +Profile for namespace asic1: headroom_profile +---------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +xon 18432 +xoff 32768 +size 51200 +---------- --------------------- + +Profile for namespace asic1: egress_lossless_profile +---------- -------------------- +dynamic_th 0 +pool egress_lossless_pool +size 0 +---------- -------------------- + +Profile for namespace asic1: egress_lossy_profile +---------- ----------------- +dynamic_th 0 +pool egress_lossy_pool +size 0 +---------- ----------------- + +Total profiles: 4 +""" + +show_mmu_config_all_masic = """\ +Pool for namespace asic0: ingress_lossy_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic0: ingress_lossless_pool_hbm +---- --------- +mode static +size 139458240 +type ingress +---- --------- + +Profile for namespace asic0: ingress_lossy_profile +---------- ------------------ +dynamic_th 3 +pool ingress_lossy_pool +size 0 +---------- ------------------ + +Profile for namespace asic0: ingress_lossless_profile_hbm +--------- ------------------------- +static_th 12121212 +pool ingress_lossless_pool_hbm +size 0 +--------- ------------------------- + +Pool for namespace asic1: ingress_lossless_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic1: egress_lossless_pool +---- -------- +mode dynamic +size 13945824 +type egress +---- -------- + +Pool for namespace asic1: egress_lossy_pool +---- ------- +mode dynamic +type egress +---- ------- + +Profile for namespace asic1: alpha_profile +------------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +headroom_type dynamic +------------- --------------------- + +Profile for namespace asic1: headroom_profile +---------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +xon 18432 +xoff 32768 +size 51200 +---------- --------------------- + +Profile for namespace asic1: egress_lossless_profile +---------- -------------------- +dynamic_th 0 +pool egress_lossless_pool +size 0 +---------- -------------------- + +Profile for namespace asic1: egress_lossy_profile +---------- ----------------- +dynamic_th 0 +pool egress_lossy_pool +size 0 +---------- ----------------- + +""" + testData = { 'mmuconfig_list' : {'cmd' : ['show'], 'args' : [], 'rc' : 0, 'rc_output': show_mmu_config }, - 'mmu_cfg_static_th' : {'cmd' : ['config'], - 'args' : ['-p', 'ingress_lossless_profile_hbm', '-s', '12121213'], - 'rc' : 0, - 'db_table' : 'BUFFER_PROFILE', - 'cmp_args' : ['ingress_lossless_profile_hbm,static_th,12121213'], - 'rc_msg' : '' - }, + 'mmu_cfg_static_th': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', '-s', '12121213'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': [',ingress_lossless_profile_hbm,static_th,12121213'], + 'rc_msg': '' + }, 'mmu_cfg_alpha' : {'cmd' : ['config'], 'args' : ['-p', 'alpha_profile', '-a', '2'], 'rc' : 0, 'db_table' : 'BUFFER_PROFILE', - 'cmp_args' : ['alpha_profile,dynamic_th,2'], + 'cmp_args': [',alpha_profile,dynamic_th,2'], 'rc_msg' : '' }, - 'mmu_cfg_alpha_invalid' : {'cmd' : ['config'], - 'args' : ['-p', 'alpha_profile', '-a', '12'], - 'rc' : 2, - 'rc_msg' : 'Usage: mmu [OPTIONS]\nTry "mmu --help" for help.\n\nError: Invalid value for "-a": 12 is not in the valid range of -8 to 8.\n' - } - + 'mmu_cfg_alpha_invalid': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '12'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\nTry "mmu --help" for help.\n' + '\nError: Invalid value for "-a": 12 is not in the ' + 'valid range of -8 to 8.\n') + }, + 'mmu_cfg_list_one_masic': {'cmd': ['show'], + 'args': ['-n', 'asic0'], + 'rc': 0, + 'rc_output': show_mmu_config_asic0 + }, + 'mmu_cfg_list_one_verbose_masic': {'cmd': ['show'], + 'args': ['-n', 'asic1', '-vv'], + 'rc': 0, + 'rc_output': show_mmu_config_asic1_verbose + }, + 'mmu_cfg_list_all_masic': {'cmd': ['show'], + 'args': [], + 'rc': 0, + 'rc_output': show_mmu_config_all_masic + }, + 'mmu_cfg_alpha_one_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '2', '-n', 'asic0'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,alpha_profile,dynamic_th,2'], + 'rc_msg': '' + }, + 'mmu_cfg_alpha_all_verbose_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '2', '-vv'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,alpha_profile,dynamic_th,2', + 'asic1,alpha_profile,dynamic_th,2'], + 'rc_msg': ('Setting alpha_profile dynamic_th value ' + 'to 2 for namespace asic0\n' + 'Setting alpha_profile dynamic_th value ' + 'to 2 for namespace asic1\n') + }, + 'mmu_cfg_static_th_one_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', + '-s', '12121215', '-n', 'asic0'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,ingress_lossless_profile_hbm,static_th,12121215'], + 'rc_msg': '' + }, + 'mmu_cfg_static_th_all_verbose_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', + '-s', '12121214', '-vv'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': [('asic0,ingress_lossless_profile_hbm,' + 'static_th,12121214'), + ('asic1,ingress_lossless_profile_hbm,' + 'static_th,12121214')], + 'rc_msg': ('Setting ingress_lossless_profile_hbm static_th ' + 'value to 12121214 for namespace asic0\n' + 'Setting ingress_lossless_profile_hbm static_th ' + 'value to 12121214 for namespace asic1\n') + }, + 'mmu_cfg_alpha_invalid_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '12'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\n' + 'Try "mmu --help" for help.\n\n' + 'Error: Invalid value for "-a": 12 ' + 'is not in the valid range of -8 to 8.\n') + }, + 'mmu_cfg_static_th_invalid_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', '-s', '-1'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\n' + 'Try "mmu --help" for help.\n\n' + 'Error: Invalid value for "-s": ' + '-1 is smaller than the minimum valid value 0.\n') + } } diff --git a/tests/mmuconfig_test.py b/tests/mmuconfig_test.py index 7218270e36..03a849eed5 100644 --- a/tests/mmuconfig_test.py +++ b/tests/mmuconfig_test.py @@ -7,7 +7,7 @@ import config.main as config import show.main as show from utilities_common.db import Db -from .mmuconfig_input.mmuconfig_test_vectors import * +from .mmuconfig_input.mmuconfig_test_vectors import testData test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -16,24 +16,12 @@ sys.path.insert(0, modules_path) -class Testmmuconfig(object): +class TestMmuConfigBase(object): @classmethod def setup_class(cls): + print('SETUP') os.environ["PATH"] += os.pathsep + scripts_path os.environ['UTILITIES_UNIT_TESTING'] = "2" - print("SETUP") - - def test_mmu_show_config(self): - self.executor(testData['mmuconfig_list']) - - def test_mmu_alpha_config(self): - self.executor(testData['mmu_cfg_alpha']) - - def test_mmu_alpha_invalid_config(self): - self.executor(testData['mmu_cfg_alpha_invalid']) - - def test_mmu_staticth_config(self): - self.executor(testData['mmu_cfg_static_th']) def executor(self, input): runner = CliRunner() @@ -48,6 +36,7 @@ def executor(self, input): result = runner.invoke(exec_cmd, input['args']) exit_code = result.exit_code output = result.output + elif 'config' in input['cmd']: exec_cmd = config.config.commands["mmu"] result = runner.invoke(exec_cmd, input['args'], catch_exceptions=False) @@ -66,8 +55,8 @@ def executor(self, input): fd = open('/tmp/mmuconfig', 'r') cmp_data = json.load(fd) for args in input['cmp_args']: - profile, name, value = args.split(',') - assert(cmp_data[profile][name] == value) + namespace, profile, name, value = args.split(',') + assert(cmp_data[namespace][profile][name] == value) fd.close() if 'rc_msg' in input: @@ -76,7 +65,6 @@ def executor(self, input): if 'rc_output' in input: assert output == input['rc_output'] - @classmethod def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) @@ -84,3 +72,17 @@ def teardown_class(cls): if os.path.isfile('/tmp/mmuconfig'): os.remove('/tmp/mmuconfig') print("TEARDOWN") + + +class TestMmuConfig(TestMmuConfigBase): + def test_mmu_show_config(self): + self.executor(testData['mmuconfig_list']) + + def test_mmu_alpha_config(self): + self.executor(testData['mmu_cfg_alpha']) + + def test_mmu_alpha_invalid_config(self): + self.executor(testData['mmu_cfg_alpha_invalid']) + + def test_mmu_staticth_config(self): + self.executor(testData['mmu_cfg_static_th']) diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 8b867bdc96..593170630f 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -303,5 +303,47 @@ "SYSLOG_CONFIG_FEATURE|database": { "rate_limit_interval": "222", "rate_limit_burst": "22222" + }, + "WRED_PROFILE|AZURE_LOSSLESS": { + "red_max_threshold": "2097152", + "ecn": "ecn_all", + "green_min_threshold": "1048576", + "red_min_threshold": "1048576", + "yellow_min_threshold": "1048576", + "green_max_threshold": "2097152", + "green_drop_probability": "5", + "yellow_max_threshold": "2097152", + "yellow_drop_probability": "5", + "red_drop_probability": "5" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "Serverss0", + "port": "eth0" + }, + "QUEUE|Ethernet4|0": { + "scheduler": "[SCHEDULAR|scheduler.0]" + }, + "QUEUE|Ethernet4|1": { + "scheduler": "[SCHEDULAR|scheduler.0]", + "wred_profile": "AZURE_LOSSLESS" + }, + "BUFFER_POOL|ingress_lossy_pool": { + "mode": "dynamic", + "type": "ingress" + }, + "BUFFER_POOL|ingress_lossless_pool_hbm": { + "mode": "static", + "size": "139458240", + "type": "ingress" + }, + "BUFFER_PROFILE|ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossy_pool", + "size": "0" + }, + "BUFFER_PROFILE|ingress_lossless_profile_hbm": { + "static_th": "12121212", + "pool": "ingress_lossless_pool_hbm", + "size": "0" } } diff --git a/tests/mock_tables/asic0/counters_db.json b/tests/mock_tables/asic0/counters_db.json index 53e3b558a2..610662a019 100644 --- a/tests/mock_tables/asic0/counters_db.json +++ b/tests/mock_tables/asic0/counters_db.json @@ -2202,14 +2202,14 @@ "oid:0x1000000004005": "SAI_QUEUE_TYPE_UNICAST", "oid:0x1000000004006": "SAI_QUEUE_TYPE_UNICAST", "oid:0x1000000004007": "SAI_QUEUE_TYPE_UNICAST", - "oid:0x1000000004008": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004009": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004010": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004011": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004012": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004013": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004014": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004015": "SAI_QUEUE_TYPE_MULTICAST" + "oid:0x1000000004008": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004009": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004010": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004011": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004012": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004013": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004014": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004015": "SAI_QUEUE_TYPE_ALL" }, "COUNTERS_FABRIC_PORT_NAME_MAP" : { "PORT0": "oid:0x1000000000143", @@ -2489,5 +2489,302 @@ "COUNTERS:oid:0x1600000000034d":{ "SAI_COUNTER_STAT_PACKETS": 200, "SAI_COUNTER_STAT_BYTES": 4000 + }, + "COUNTERS_BUFFER_POOL_NAME_MAP": { + "ingress_lossless_pool": "oid:0x18000000000c10" + }, + "COUNTERS_PG_NAME_MAP": { + "Enternet0:0": "oid:100000000b0f0", + "Enternet0:1": "oid:100000000b0f1", + "Enternet0:2": "oid:100000000b0f2", + "Enternet0:3": "oid:100000000b0f3", + "Enternet0:4": "oid:100000000b0f4", + "Enternet0:5": "oid:100000000b0f5", + "Enternet0:6": "oid:100000000b0f6", + "Enternet0:7": "oid:100000000b0f7", + "Enternet0:8": "oid:100000000b0f8", + "Enternet0:9": "oid:100000000b0f9", + "Enternet0:10": "oid:100000000b0fa", + "Enternet0:11": "oid:100000000b0fb", + "Enternet0:12": "oid:100000000b0fc", + "Enternet0:13": "oid:100000000b0fd", + "Enternet0:14": "oid:100000000b0fe", + "Enternet0:15": "oid:100000000b0ff", + "Enternet4:0": "oid:0x100000000b1f0", + "Enternet4:1": "oid:0x100000000b1f1", + "Enternet4:2": "oid:0x100000000b1f2", + "Enternet4:3": "oid:0x100000000b1f3", + "Enternet4:4": "oid:0x100000000b1f4", + "Enternet4:5": "oid:0x100000000b1f5", + "Enternet4:6": "oid:0x100000000b1f6", + "Enternet4:7": "oid:0x100000000b1f7", + "Enternet4:8": "oid:0x100000000b1f8", + "Enternet4:9": "oid:0x100000000b1f9", + "Enternet4:10": "oid:0x100000000b1fa", + "Enternet4:11": "oid:0x100000000b1fb", + "Enternet4:12": "oid:0x100000000b1fc", + "Enternet4:13": "oid:0x100000000b1fd", + "Enternet4:14": "oid:0x100000000b1fe", + "Enternet4:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000002", + "oid:100000000b0f1": "oid:0x1000000000002", + "oid:100000000b0f2": "oid:0x1000000000002", + "oid:100000000b0f3": "oid:0x1000000000002", + "oid:100000000b0f4": "oid:0x1000000000002", + "oid:100000000b0f5": "oid:0x1000000000002", + "oid:100000000b0f6": "oid:0x1000000000002", + "oid:100000000b0f7": "oid:0x1000000000002", + "oid:100000000b0f8": "oid:0x1000000000002", + "oid:100000000b0f9": "oid:0x1000000000002", + "oid:100000000b0fa": "oid:0x1000000000002", + "oid:100000000b0fb": "oid:0x1000000000002", + "oid:100000000b0fc": "oid:0x1000000000002", + "oid:100000000b0fd": "oid:0x1000000000002", + "oid:100000000b0fe": "oid:0x1000000000002", + "oid:100000000b0ff": "oid:0x1000000000002", + "oid:0x100000000b1f0": "oid:0x1000000000004", + "oid:0x100000000b1f1": "oid:0x1000000000004", + "oid:0x100000000b1f2": "oid:0x1000000000004", + "oid:0x100000000b1f3": "oid:0x1000000000004", + "oid:0x100000000b1f4": "oid:0x1000000000004", + "oid:0x100000000b1f5": "oid:0x1000000000004", + "oid:0x100000000b1f6": "oid:0x1000000000004", + "oid:0x100000000b1f7": "oid:0x1000000000004", + "oid:0x100000000b1f8": "oid:0x1000000000004", + "oid:0x100000000b1f9": "oid:0x1000000000004", + "oid:0x100000000b1fa": "oid:0x1000000000004", + "oid:0x100000000b1fb": "oid:0x1000000000004", + "oid:0x100000000b1fc": "oid:0x1000000000004", + "oid:0x100000000b1fd": "oid:0x1000000000004", + "oid:0x100000000b1fe": "oid:0x1000000000004", + "oid:0x100000000b1ff" : "oid:0x1000000000004" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, + "USER_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 100, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 100 + }, + "USER_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 101, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 101 + }, + "USER_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 102, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 102 + }, + "USER_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 103, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 103 + }, + "USER_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 104, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 104 + }, + "USER_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 105, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 105 + }, + "USER_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 106, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 106 + }, + "USER_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 107, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 107 + }, + "USER_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 108, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 108 + }, + "USER_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 109, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 109 + }, + "USER_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 110, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 110 + }, + "USER_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 111, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 111 + }, + "USER_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 112, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 112 + }, + "USER_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 113, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 113 + }, + "USER_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 114, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 114 + }, + "USER_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 115, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 115 + }, + "USER_WATERMARKS:oid:0x100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 200, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 200 + }, + "USER_WATERMARKS:oid:0x100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 201, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 201 + }, + "USER_WATERMARKS:oid:0x100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 202, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 202 + }, + "USER_WATERMARKS:oid:0x100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 203, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 203 + }, + "USER_WATERMARKS:oid:0x100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 204, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 204 + }, + "USER_WATERMARKS:oid:0x100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 205, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 205 + }, + "USER_WATERMARKS:oid:0x100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 206, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 206 + }, + "USER_WATERMARKS:oid:0x100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 207, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 207 + }, + "USER_WATERMARKS:oid:0x100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 208, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 208 + }, + "USER_WATERMARKS:oid:0x100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 209, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 209 + }, + "USER_WATERMARKS:oid:0x100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 210, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 210 + }, + "USER_WATERMARKS:oid:0x100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 211, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 211 + }, + "USER_WATERMARKS:oid:0x100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 212, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 212 + }, + "USER_WATERMARKS:oid:0x100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 213, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 213 + }, + "USER_WATERMARKS:oid:0x100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 214, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 214 + }, + "USER_WATERMARKS:oid:0x100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 215, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 215 + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" } } diff --git a/tests/mock_tables/asic0/database_config.json b/tests/mock_tables/asic0/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic0/database_config.json +++ b/tests/mock_tables/asic0/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/asic0/ip_route_lc.json b/tests/mock_tables/asic0/ip_route_lc.json new file mode 100644 index 0000000000..19cfd5e5f0 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_lc.json @@ -0,0 +1,66 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 4, + "internalNextHopActiveNum": 4, + "nexthopGroupId": 566, + "installedNexthopGroupId": 566, + "uptime": "04w0d11h", + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 2, + "interfaceName": "PortChannel1", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 4, + "interfaceName": "PortChannel5", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 5, + "interfaceName": "PortChannel9", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 3, + "interfaceName": "PortChannel13", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/ip_route_lc_2.json b/tests/mock_tables/asic0/ip_route_lc_2.json new file mode 100644 index 0000000000..8cadf1db22 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_lc_2.json @@ -0,0 +1,56 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthopGroupId": 2122, + "installedNexthopGroupId": 2122, + "uptime": "01:01:51", + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "10.0.0.1", + "afi": "ipv4", + "interfaceIndex": 29, + "interfaceName": "PortChannel102", + "active": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "10.0.0.7", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "10.0.0.7", + "afi": "ipv4", + "interfaceIndex": 52, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/ip_route_remote_lc.json b/tests/mock_tables/asic0/ip_route_remote_lc.json new file mode 100644 index 0000000000..0e8f4a56c7 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_remote_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 56823ae113..5c1d9f344c 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -242,5 +242,60 @@ "SYSLOG_CONFIG_FEATURE|database": { "rate_limit_interval": "555", "rate_limit_burst": "55555" + }, + "WRED_PROFILE|AZURE_LOSSY": { + "red_max_threshold":"32760", + "red_min_threshold":"4095", + "yellow_max_threshold":"32760", + "yellow_min_threshold":"4095", + "green_max_threshold": "32760", + "green_min_threshold": "4095", + "yellow_drop_probability": "2" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "Servers", + "port": "eth0" + }, + "QUEUE|Ethernet0|0": { + "scheduler": "[SCHEDULAR|scheduler.0]", + "wred_profile": "AZURE_LOSSLESS" + }, + "QUEUE|Ethernet0|1": { + "scheduler": "[SCHEDULAR|scheduler.0]" + }, + "BUFFER_POOL|ingress_lossless_pool": { + "mode": "dynamic", + "type": "ingress" + }, + "BUFFER_PROFILE|alpha_profile": { + "dynamic_th": "0", + "pool": "ingress_lossless_pool", + "headroom_type": "dynamic" + }, + "BUFFER_PROFILE|headroom_profile": { + "dynamic_th": "0", + "pool": "ingress_lossless_pool", + "xon": "18432", + "xoff": "32768", + "size": "51200" + }, + "BUFFER_POOL|egress_lossless_pool": { + "mode": "dynamic", + "size": "13945824", + "type": "egress" + }, + "BUFFER_PROFILE|egress_lossless_profile": { + "dynamic_th": "0", + "pool": "egress_lossless_pool", + "size": "0" + }, + "BUFFER_POOL|egress_lossy_pool": { + "mode": "dynamic", + "type": "egress" + }, + "BUFFER_PROFILE|egress_lossy_profile": { + "dynamic_th": "0", + "pool": "egress_lossy_pool", + "size": "0" } } diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index f919742157..1455f069c0 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -309,6 +309,111 @@ "oid:0x100000000b1fe": "14", "oid:0x100000000b1ff" : "15" }, + "COUNTERS_BUFFER_POOL_NAME_MAP": { + "ingress_lossless_pool": "oid:0x18000000000c10" + }, + "COUNTERS_QUEUE_PORT_MAP": { + "oid:0x100000000b100": "oid:0x1000000000b06", + "oid:0x100000000b101": "oid:0x1000000000b06", + "oid:0x100000000b102": "oid:0x1000000000b06", + "oid:0x100000000b103": "oid:0x1000000000b06", + "oid:0x100000000b104": "oid:0x1000000000b06", + "oid:0x100000000b105": "oid:0x1000000000b06", + "oid:0x100000000b106": "oid:0x1000000000b06", + "oid:0x100000000b107": "oid:0x1000000000b06", + "oid:0x100000000b108": "oid:0x1000000000b06", + "oid:0x100000000b109": "oid:0x1000000000b06", + "oid:0x100000000b110": "oid:0x1000000000b06", + "oid:0x100000000b111": "oid:0x1000000000b06", + "oid:0x100000000b112": "oid:0x1000000000b06", + "oid:0x100000000b113": "oid:0x1000000000b06", + "oid:0x100000000b114": "oid:0x1000000000b06", + "oid:0x100000000b115": "oid:0x1000000000b06", + "oid:0x100000000b200": "oid:0x1000000000b08", + "oid:0x100000000b201": "oid:0x1000000000b08", + "oid:0x100000000b202": "oid:0x1000000000b08", + "oid:0x100000000b203": "oid:0x1000000000b08", + "oid:0x100000000b204": "oid:0x1000000000b08", + "oid:0x100000000b205": "oid:0x1000000000b08", + "oid:0x100000000b206": "oid:0x1000000000b08", + "oid:0x100000000b207": "oid:0x1000000000b08", + "oid:0x100000000b208": "oid:0x1000000000b08", + "oid:0x100000000b209": "oid:0x1000000000b08", + "oid:0x100000000b210": "oid:0x1000000000b08", + "oid:0x100000000b211": "oid:0x1000000000b08", + "oid:0x100000000b212": "oid:0x1000000000b08", + "oid:0x100000000b213": "oid:0x1000000000b08", + "oid:0x100000000b214": "oid:0x1000000000b08", + "oid:0x100000000b215": "oid:0x1000000000b08" + }, + "COUNTERS_QUEUE_TYPE_MAP": { + "oid:0x100000000b100": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b101": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b102": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b103": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b104": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b105": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b106": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b107": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b108": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b109": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b110": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b111": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b112": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b113": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b114": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b115": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b200": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b201": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b202": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b203": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b204": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b205": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b206": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b207": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b208": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b209": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b210": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b211": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b212": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b213": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b214": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b215": "SAI_QUEUE_TYPE_ALL" + }, + "COUNTERS_QUEUE_INDEX_MAP": { + "oid:0x100000000b100": "0", + "oid:0x100000000b101": "1", + "oid:0x100000000b102": "2", + "oid:0x100000000b103": "3", + "oid:0x100000000b104": "4", + "oid:0x100000000b105": "5", + "oid:0x100000000b106": "6", + "oid:0x100000000b107": "7", + "oid:0x100000000b108": "8", + "oid:0x100000000b109": "9", + "oid:0x100000000b110": "10", + "oid:0x100000000b111": "11", + "oid:0x100000000b112": "12", + "oid:0x100000000b113": "13", + "oid:0x100000000b114": "14", + "oid:0x100000000b115": "15", + "oid:0x100000000b200": "0", + "oid:0x100000000b201": "1", + "oid:0x100000000b202": "2", + "oid:0x100000000b203": "3", + "oid:0x100000000b204": "4", + "oid:0x100000000b205": "5", + "oid:0x100000000b206": "6", + "oid:0x100000000b207": "7", + "oid:0x100000000b208": "8", + "oid:0x100000000b209": "9", + "oid:0x100000000b210": "10", + "oid:0x100000000b211": "11", + "oid:0x100000000b212": "12", + "oid:0x100000000b213": "13", + "oid:0x100000000b214": "14", + "oid:0x100000000b215": "15" + }, "COUNTERS_LAG_NAME_MAP": { "PortChannel0001": "oid:0x60000000005a1", "PortChannel0002": "oid:0x60000000005a2", @@ -1262,5 +1367,313 @@ "COUNTERS:oid:0x1600000000034f":{ "SAI_COUNTER_STAT_PACKETS": 1000, "SAI_COUNTER_STAT_BYTES": 2000 + }, + "USER_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 100, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 100 + }, + "USER_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 101, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 101 + }, + "USER_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 102, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 102 + }, + "USER_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 103, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 103 + }, + "USER_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 104, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 104 + }, + "USER_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 105, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 105 + }, + "USER_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 106, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 106 + }, + "USER_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 107, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 107 + }, + "USER_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 108, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 108 + }, + "USER_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 109, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 109 + }, + "USER_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 110, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 110 + }, + "USER_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 111, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 111 + }, + "USER_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 112, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 112 + }, + "USER_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 113, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 113 + }, + "USER_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 114, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 114 + }, + "USER_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 115, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 115 + }, + "USER_WATERMARKS:oid:0x100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 200, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 200 + }, + "USER_WATERMARKS:oid:0x100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 201, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 201 + }, + "USER_WATERMARKS:oid:0x100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 202, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 202 + }, + "USER_WATERMARKS:oid:0x100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 203, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 203 + }, + "USER_WATERMARKS:oid:0x100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 204, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 204 + }, + "USER_WATERMARKS:oid:0x100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 205, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 205 + }, + "USER_WATERMARKS:oid:0x100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 206, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 206 + }, + "USER_WATERMARKS:oid:0x100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 207, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 207 + }, + "USER_WATERMARKS:oid:0x100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 208, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 208 + }, + "USER_WATERMARKS:oid:0x100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 209, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 209 + }, + "USER_WATERMARKS:oid:0x100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 210, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 210 + }, + "USER_WATERMARKS:oid:0x100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 211, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 211 + }, + "USER_WATERMARKS:oid:0x100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 212, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 212 + }, + "USER_WATERMARKS:oid:0x100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 213, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 213 + }, + "USER_WATERMARKS:oid:0x100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 214, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 214 + }, + "USER_WATERMARKS:oid:0x100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 215, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 215 + }, + "USER_WATERMARKS:oid:0x18000000000c10": { + "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES": "3000", + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES": "432640" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" + }, + "USER_WATERMARKS:oid:0x100000000b100": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2057328" + }, + "USER_WATERMARKS:oid:0x100000000b101": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "USER_WATERMARKS:oid:0x100000000b102": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b103": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b104": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b105": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b106": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b107": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "28" + }, + "USER_WATERMARKS:oid:0x100000000b108": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b109": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b110": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "5" + }, + "USER_WATERMARKS:oid:0x100000000b111": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2057328" + }, + "USER_WATERMARKS:oid:0x100000000b112": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "208" + }, + "USER_WATERMARKS:oid:0x100000000b113": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "USER_WATERMARKS:oid:0x100000000b114": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "228" + }, + "USER_WATERMARKS:oid:0x100000000b115": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" } } diff --git a/tests/mock_tables/asic1/database_config.json b/tests/mock_tables/asic1/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic1/database_config.json +++ b/tests/mock_tables/asic1/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/asic1/ip_route_lc.json b/tests/mock_tables/asic1/ip_route_lc.json new file mode 100644 index 0000000000..e6c0063f90 --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_lc_2.json b/tests/mock_tables/asic1/ip_route_lc_2.json new file mode 100644 index 0000000000..f7dff5d51b --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_lc_2.json @@ -0,0 +1,56 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthopGroupId": 2173, + "installedNexthopGroupId": 2173, + "uptime": "01:01:57", + "nexthops": [ + { + "flags": 5, + "ip": "10.0.0.1", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "10.0.0.1", + "afi": "ipv4", + "interfaceIndex": 52, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "10.0.0.7", + "afi": "ipv4", + "interfaceIndex": 29, + "interfaceName": "PortChannel106", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_remote_lc.json b/tests/mock_tables/asic1/ip_route_remote_lc.json new file mode 100644 index 0000000000..e6c0063f90 --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_remote_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic2/database_config.json b/tests/mock_tables/asic2/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic2/database_config.json +++ b/tests/mock_tables/asic2/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json index 6af9e19da4..365cbf80cd 100644 --- a/tests/mock_tables/chassis_state_db.json +++ b/tests/mock_tables/chassis_state_db.json @@ -7,6 +7,62 @@ }, "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc3": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet11/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 } - } \ No newline at end of file diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 108fa7593d..187efed553 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1785,6 +1785,10 @@ "POLL_INTERVAL": "10000", "FLEX_COUNTER_STATUS": "enable" }, + "FLEX_COUNTER_TABLE|ENI": { + "POLL_INTERVAL": "1000", + "FLEX_COUNTER_STATUS": "enable" + }, "PFC_WD|Ethernet0": { "action": "drop", "detection_time": "600", diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index 2f16c7014d..9e553c2901 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -882,7 +882,23 @@ "SAI_PORT_STAT_ETHER_STATS_JABBERS": "0", "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "130402", "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "3", - "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "4" + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "4", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0": "1000000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1": "900000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2": "800000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3": "700000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4": "600000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5": "500000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6": "400000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7": "300000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15": "0" }, "COUNTERS:oid:0x1000000000013": { "SAI_PORT_STAT_IF_IN_UCAST_PKTS": "4", diff --git a/tests/mock_tables/database_config.json b/tests/mock_tables/database_config.json index f55c0734c2..9d6125fc74 100644 --- a/tests/mock_tables/database_config.json +++ b/tests/mock_tables/database_config.json @@ -4,6 +4,11 @@ "hostname" : "227.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -61,6 +66,11 @@ "id" : 13, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/dbconnector.py b/tests/mock_tables/dbconnector.py index 4ccb392368..379c4e75cd 100644 --- a/tests/mock_tables/dbconnector.py +++ b/tests/mock_tables/dbconnector.py @@ -68,6 +68,32 @@ def config_set(self, *args): class MockPubSub: + class MessageList: + """A custom subscriptable class to hold messages in a list-like format""" + def __init__(self, channel): + self._data = [] + self._channel = channel + + def __getitem__(self, index): + return self._data[index] + + def __setitem__(self, index, value): + self._data[index] = value + + def append(self, msg): + print(f"Message published to {self._channel}: ", msg) + self._data.append(msg) + + def __init__(self, namespace): + # Initialize channels required for testing + self.messages = self.MessageList('WATERMARK_CLEAR_REQUEST') + self.channels = {'WATERMARK_CLEAR_REQUEST': self.messages} + self.namespace = namespace + + def __getitem__(self, key): + print("Channel:", key, "accessed in namespace:", self.namespace) + return self.channels[key] + def get_message(self): return None @@ -99,7 +125,7 @@ def __init__(self, *args, **kwargs): db_name = kwargs.pop('db_name') self.decode_responses = kwargs.pop('decode_responses', False) == True fname = db_name.lower() + ".json" - self.pubsub = MockPubSub() + self.pubsub = MockPubSub(namespace) if namespace is not None and namespace is not multi_asic.DEFAULT_NAMESPACE: fname = os.path.join(INPUT_DIR, namespace, fname) diff --git a/tests/multi_asic_ecnconfig_test.py b/tests/multi_asic_ecnconfig_test.py new file mode 100644 index 0000000000..034a517ace --- /dev/null +++ b/tests/multi_asic_ecnconfig_test.py @@ -0,0 +1,64 @@ +import os +import sys +from .ecn_test import TestEcnConfigBase +from .ecn_input.ecn_test_vectors import testData + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, root_path) +sys.path.insert(0, modules_path) + + +class TestEcnConfigMultiAsic(TestEcnConfigBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + def test_ecn_show_config_all_masic(self): + self.executor(testData['ecn_show_config_masic']) + + def test_ecn_show_config_all_verbose_masic(self): + self.executor(testData['test_ecn_show_config_verbose_masic']) + + def test_ecn_show_config_one_masic(self): + self.executor(testData['test_ecn_show_config_namespace']) + + def test_ecn_show_config_one_verbose_masic(self): + self.executor(testData['test_ecn_show_config_namespace_verbose']) + + def test_ecn_config_change_other_threshold_masic(self): + self.executor(testData['ecn_cfg_threshold_masic']) + + def test_ecn_config_change_other_prob_masic(self): + self.executor(testData['ecn_cfg_probability_masic']) + + def test_ecn_config_change_gdrop_verbose_all_masic(self): + self.executor(testData['ecn_cfg_gdrop_verbose_all_masic']) + + def test_ecn_config_multi_set_verbose_all_masic(self): + self.executor(testData['ecn_cfg_multi_set_verbose_all_masic']) + + def test_ecn_queue_get_masic(self): + self.executor(testData['ecn_q_get_masic']) + + def test_ecn_queue_get_verbose_masic(self): + self.executor(testData['ecn_q_get_verbose_masic']) + + def test_ecn_queue_get_all_masic(self): + self.executor(testData['ecn_q_get_all_ns_masic']) + + def test_ecn_queue_get_all_verbose_masic(self): + self.executor(testData['ecn_q_get_all_ns_verbose_masic']) + + def test_ecn_q_set_off_all_masic(self): + self.executor(testData['ecn_cfg_q_all_ns_off_masic']) + + def test_ecn_q_set_off_one_masic(self): + self.executor(testData['ecn_cfg_q_one_ns_off_verbose_masic']) + + @classmethod + def teardown_class(cls): + super().teardown_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" diff --git a/tests/multi_asic_mmuconfig_test.py b/tests/multi_asic_mmuconfig_test.py new file mode 100644 index 0000000000..1590d3f38f --- /dev/null +++ b/tests/multi_asic_mmuconfig_test.py @@ -0,0 +1,49 @@ +import os +import sys +from .mmuconfig_test import TestMmuConfigBase +from .mmuconfig_input.mmuconfig_test_vectors import testData + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, root_path) +sys.path.insert(0, modules_path) + + +class TestMmuConfigMultiAsic(TestMmuConfigBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + def test_mmu_show_config_one_masic(self): + self.executor(testData['mmu_cfg_list_one_masic']) + + def test_mmu_show_config_one_verbose_masic(self): + self.executor(testData['mmu_cfg_list_one_verbose_masic']) + + def test_mmu_show_config_all_masic(self): + self.executor(testData['mmu_cfg_list_all_masic']) + + def test_mmu_alpha_config_one_masic(self): + self.executor(testData['mmu_cfg_alpha_one_masic']) + + def test_mmu_alpha_config_all_verbose_masic(self): + self.executor(testData['mmu_cfg_alpha_all_verbose_masic']) + + def test_mmu_staticth_config_one_masic(self): + self.executor(testData['mmu_cfg_static_th_one_masic']) + + def test_mmu_staticth_config_all_verbose_masic(self): + self.executor(testData['mmu_cfg_static_th_all_verbose_masic']) + + def test_mmu_alpha_config_invalid_masic(self): + self.executor(testData['mmu_cfg_alpha_invalid_masic']) + + def test_mmu_staticth_config_invalid_masic(self): + self.executor(testData['mmu_cfg_static_th_invalid_masic']) + + @classmethod + def teardown_class(cls): + super().teardown_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" diff --git a/tests/multi_asic_pfc_test.py b/tests/multi_asic_pfc_test.py new file mode 100644 index 0000000000..52bfcf4982 --- /dev/null +++ b/tests/multi_asic_pfc_test.py @@ -0,0 +1,133 @@ +import os +import sys +import json +import importlib +import pfc.main as pfc +from .pfc_test import TestPfcBase +from click.testing import CliRunner +from .pfc_input.pfc_test_vectors import testData + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "pfc") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestPfcMultiAsic(TestPfcBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + # Multi-asic utils rely on the database that is loaded + # We reload the multi_asic database and update the multi-asic utils + # Pfc uses click cmds that use multi_asic utils, hence we reload pfc too + + import mock_tables.mock_multi_asic + importlib.reload(mock_tables.mock_multi_asic) + mock_tables.dbconnector.load_namespace_config() + + import utilities_common + importlib.reload(utilities_common.multi_asic) + importlib.reload(pfc) + + def executor(self, input): + runner = CliRunner() + result = runner.invoke(pfc.cli, input['cmd']) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + assert exit_code == input['rc'] + + # For config commands we dump modified value in a tmp JSON file for testing + if 'cmp_args' in input: + fd = open('/tmp/pfc_testdata.json', 'r') + cmp_data = json.load(fd) + + # Verify assignments + for args in input['cmp_args']: + namespace, table, key, field, expected_val = args + assert(cmp_data[namespace][table][key][field] == expected_val) + fd.close() + + if 'rc_msg' in input: + assert input['rc_msg'] in output + + if 'rc_output' in input: + assert output == input['rc_output'] + + def test_pfc_show_asymmetric_all_asic0_masic(self): + self.executor(testData['pfc_show_asymmetric_all_asic0_masic']) + + def test_pfc_show_asymmetric_all_asic1_masic(self): + self.executor(testData['pfc_show_asymmetric_all_asic1_masic']) + + def test_pfc_show_asymmetric_all_masic(self): + self.executor(testData['pfc_show_asymmetric_all_masic']) + + def test_pfc_show_asymmetric_intf_one_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_one_masic']) + + def test_pfc_show_asymmetric_intf_all_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_all_masic']) + + def test_pfc_show_asymmetric_intf_fake_one_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_fake_one_masic']) + + def test_pfc_show_priority_all_asic0_masic(self): + self.executor(testData['pfc_show_priority_all_asic0_masic']) + + def test_pfc_show_priority_all_asic1_masic(self): + self.executor(testData['pfc_show_priority_all_asic1_masic']) + + def test_pfc_show_priority_all_masic(self): + self.executor(testData['pfc_show_priority_all_masic']) + + def test_pfc_show_priority_intf_one_masic(self): + self.executor(testData['pfc_show_priority_intf_one_masic']) + + def test_pfc_show_priority_intf_all_masic(self): + self.executor(testData['pfc_show_priority_intf_all_masic']) + + def test_pfc_show_priority_intf_fake_one_masic(self): + self.executor(testData['pfc_show_priority_intf_fake_one_masic']) + + def test_pfc_show_priority_intf_fake_all_masic(self): + self.executor(testData['pfc_show_priority_intf_fake_all_masic']) + + def test_pfc_config_asymmetric_one_masic(self): + self.executor(testData['pfc_config_asymmetric_one_masic']) + + def test_pfc_config_asymmetric_invalid_one_masic(self): + self.executor(testData['pfc_config_asymmetric_invalid_one_masic']) + + def test_pfc_config_asymmetric_all_masic(self): + self.executor(testData['pfc_config_asymmetric_all_masic']) + + def test_pfc_config_asymmetric_invalid_all_masic(self): + self.executor(testData['pfc_config_asymmetric_invalid_all_masic']) + + def test_pfc_config_priority_one_masic(self): + self.executor(testData['pfc_config_priority_one_masic']) + + def test_pfc_config_priority_invalid_one_masic(self): + self.executor(testData['pfc_config_priority_invalid_one_masic']) + + def test_pfc_config_priority_all_masic(self): + self.executor(testData['pfc_config_priority_all_masic']) + + def test_pfc_config_priority_invalid_all_masic(self): + self.executor(testData['pfc_config_priority_invalid_all_masic']) + + @classmethod + def teardown_class(cls): + # Reset the database to mock single-asic state + import mock_tables.mock_single_asic + mock_tables.dbconnector.load_database_config() + + super().teardown_class() + os.environ.pop("UTILITIES_UNIT_TESTING_TOPOLOGY") diff --git a/tests/multi_asic_pgdropstat_test.py b/tests/multi_asic_pgdropstat_test.py index 94bb13011b..2a5e97cfdb 100644 --- a/tests/multi_asic_pgdropstat_test.py +++ b/tests/multi_asic_pgdropstat_test.py @@ -27,18 +27,18 @@ PG14 PG15 -------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ ------ ------ - Ethernet0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ - 0 0 - Ethernet4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ - 0 0 + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A Ethernet-BP0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ 0 0 Ethernet-BP4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ 0 0 -Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ - N/A N/A -Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ - N/A N/A +Ethernet-BP256 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP260 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 """ diff --git a/tests/multi_asic_queue_counter_test.py b/tests/multi_asic_queue_counter_test.py index fe8b057b5d..af57fa75e5 100644 --- a/tests/multi_asic_queue_counter_test.py +++ b/tests/multi_asic_queue_counter_test.py @@ -22,6 +22,7 @@ show_queue_counters = """\ +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC0 68 30 56 74 @@ -41,6 +42,7 @@ Ethernet0 MC14 82 44 42 60 Ethernet0 MC15 83 45 41 59 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 84 46 40 58 @@ -60,6 +62,7 @@ Ethernet4 MC14 98 60 26 44 Ethernet4 MC15 99 61 25 43 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP0 UC0 100 62 24 42 @@ -79,6 +82,7 @@ Ethernet-BP0 MC14 114 76 10 28 Ethernet-BP0 MC15 115 77 9 27 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP4 UC0 116 78 8 26 @@ -89,19 +93,142 @@ Ethernet-BP4 UC5 121 83 3 21 Ethernet-BP4 UC6 122 84 2 20 Ethernet-BP4 UC7 123 85 1 19 -Ethernet-BP4 MC8 124 86 0 18 -Ethernet-BP4 MC9 125 87 1 17 -Ethernet-BP4 MC10 126 88 2 16 -Ethernet-BP4 MC11 127 89 3 15 -Ethernet-BP4 MC12 128 90 4 14 -Ethernet-BP4 MC13 129 91 5 13 -Ethernet-BP4 MC14 130 92 6 12 -Ethernet-BP4 MC15 131 93 7 11 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 """ +show_queue_counters_all_asics = """\ +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet0 UC0 68 30 56 74 +Ethernet0 UC1 69 31 55 73 +Ethernet0 UC2 70 32 54 72 +Ethernet0 UC3 71 33 53 71 +Ethernet0 UC4 72 34 52 70 +Ethernet0 UC5 73 35 51 69 +Ethernet0 UC6 74 36 50 68 +Ethernet0 UC7 75 37 49 67 +Ethernet0 MC8 76 38 48 66 +Ethernet0 MC9 77 39 47 65 +Ethernet0 MC10 78 40 46 64 +Ethernet0 MC11 79 41 45 63 +Ethernet0 MC12 80 42 44 62 +Ethernet0 MC13 81 43 43 61 +Ethernet0 MC14 82 44 42 60 +Ethernet0 MC15 83 45 41 59 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet4 UC0 84 46 40 58 +Ethernet4 UC1 85 47 39 57 +Ethernet4 UC2 86 48 38 56 +Ethernet4 UC3 87 49 37 55 +Ethernet4 UC4 88 50 36 54 +Ethernet4 UC5 89 51 35 53 +Ethernet4 UC6 90 52 34 52 +Ethernet4 UC7 91 53 33 51 +Ethernet4 MC8 92 54 32 50 +Ethernet4 MC9 93 55 31 49 +Ethernet4 MC10 94 56 30 48 +Ethernet4 MC11 95 57 29 47 +Ethernet4 MC12 96 58 28 46 +Ethernet4 MC13 97 59 27 45 +Ethernet4 MC14 98 60 26 44 +Ethernet4 MC15 99 61 25 43 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +------------ ----- -------------- --------------- ----------- ------------ +Ethernet-BP0 UC0 100 62 24 42 +Ethernet-BP0 UC1 101 63 23 41 +Ethernet-BP0 UC2 102 64 22 40 +Ethernet-BP0 UC3 103 65 21 39 +Ethernet-BP0 UC4 104 66 20 38 +Ethernet-BP0 UC5 105 67 19 37 +Ethernet-BP0 UC6 106 68 18 36 +Ethernet-BP0 UC7 107 69 17 35 +Ethernet-BP0 MC8 108 70 16 34 +Ethernet-BP0 MC9 109 71 15 33 +Ethernet-BP0 MC10 110 72 14 32 +Ethernet-BP0 MC11 111 73 13 31 +Ethernet-BP0 MC12 112 74 12 30 +Ethernet-BP0 MC13 113 75 11 29 +Ethernet-BP0 MC14 114 76 10 28 +Ethernet-BP0 MC15 115 77 9 27 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +------------ ----- -------------- --------------- ----------- ------------ +Ethernet-BP4 UC0 116 78 8 26 +Ethernet-BP4 UC1 117 79 7 25 +Ethernet-BP4 UC2 118 80 6 24 +Ethernet-BP4 UC3 119 81 5 23 +Ethernet-BP4 UC4 120 82 4 22 +Ethernet-BP4 UC5 121 83 3 21 +Ethernet-BP4 UC6 122 84 2 20 +Ethernet-BP4 UC7 123 85 1 19 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 + +For namespace asic1: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +-------------- ----- -------------- --------------- ----------- ------------ +Ethernet-BP256 UC0 N/A N/A N/A N/A +Ethernet-BP256 UC1 N/A N/A N/A N/A +Ethernet-BP256 UC2 N/A N/A N/A N/A +Ethernet-BP256 UC3 N/A N/A N/A N/A +Ethernet-BP256 UC4 N/A N/A N/A N/A +Ethernet-BP256 UC5 N/A N/A N/A N/A +Ethernet-BP256 UC6 N/A N/A N/A N/A +Ethernet-BP256 UC7 N/A N/A N/A N/A +Ethernet-BP256 MC8 N/A N/A N/A N/A +Ethernet-BP256 MC9 N/A N/A N/A N/A +Ethernet-BP256 MC10 N/A N/A N/A N/A +Ethernet-BP256 MC11 N/A N/A N/A N/A +Ethernet-BP256 MC12 N/A N/A N/A N/A +Ethernet-BP256 MC13 N/A N/A N/A N/A +Ethernet-BP256 MC14 N/A N/A N/A N/A +Ethernet-BP256 MC15 N/A N/A N/A N/A + +For namespace asic1: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +-------------- ----- -------------- --------------- ----------- ------------ +Ethernet-BP260 UC0 N/A N/A N/A N/A +Ethernet-BP260 UC1 N/A N/A N/A N/A +Ethernet-BP260 UC2 N/A N/A N/A N/A +Ethernet-BP260 UC3 N/A N/A N/A N/A +Ethernet-BP260 UC4 N/A N/A N/A N/A +Ethernet-BP260 UC5 N/A N/A N/A N/A +Ethernet-BP260 UC6 N/A N/A N/A N/A +Ethernet-BP260 UC7 N/A N/A N/A N/A +Ethernet-BP260 ALL8 N/A N/A N/A N/A +Ethernet-BP260 ALL9 N/A N/A N/A N/A +Ethernet-BP260 ALL10 N/A N/A N/A N/A +Ethernet-BP260 ALL11 N/A N/A N/A N/A +Ethernet-BP260 ALL12 N/A N/A N/A N/A +Ethernet-BP260 ALL13 N/A N/A N/A N/A +Ethernet-BP260 ALL14 N/A N/A N/A N/A +Ethernet-BP260 ALL15 N/A N/A N/A N/A + +""" show_queue_counters_port = """\ +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP4 UC0 116 78 8 26 @@ -112,14 +239,14 @@ Ethernet-BP4 UC5 121 83 3 21 Ethernet-BP4 UC6 122 84 2 20 Ethernet-BP4 UC7 123 85 1 19 -Ethernet-BP4 MC8 124 86 0 18 -Ethernet-BP4 MC9 125 87 1 17 -Ethernet-BP4 MC10 126 88 2 16 -Ethernet-BP4 MC11 127 89 3 15 -Ethernet-BP4 MC12 128 90 4 14 -Ethernet-BP4 MC13 129 91 5 13 -Ethernet-BP4 MC14 130 92 6 12 -Ethernet-BP4 MC15 131 93 7 11 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 """ @@ -143,6 +270,12 @@ def test_queue_counters_port(self): print(result) assert result == show_queue_counters_port + def test_queue_counters_all_masic(self): + return_code, result = get_result_and_return_code(['queuestat']) + assert return_code == 0 + print(result) + assert result == show_queue_counters_all_asics + @classmethod def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) diff --git a/tests/multi_asic_watermarkstat_test.py b/tests/multi_asic_watermarkstat_test.py new file mode 100644 index 0000000000..b3bc011011 --- /dev/null +++ b/tests/multi_asic_watermarkstat_test.py @@ -0,0 +1,145 @@ +import os +import sys +from .wm_input.wm_test_vectors import testData +from .utils import get_result_and_return_code +from click.testing import CliRunner +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestWatermarkstatMultiAsic(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def executor(self, testcase): + runner = CliRunner() + for input in testcase: + if 'clear' in input['cmd']: + exec_cmd = input['cmd'][1:] + print(exec_cmd) + exit_code, output = get_result_and_return_code(exec_cmd) + else: + if len(input['cmd']) == 3: + exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]].commands[input['cmd'][2]] + else: + exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]] + args = [] if 'args' not in input else input['args'] + result = runner.invoke(exec_cmd, args) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + expected_code = 0 if 'rc' not in input else input['rc'] + assert exit_code == expected_code + assert output == input['rc_output'] + + def test_show_pg_shared_one_masic(self): + self.executor(testData['show_pg_wm_shared_one_masic']) + + def test_show_pg_shared_all_masic(self): + self.executor(testData['show_pg_wm_shared_all_masic']) + + def test_show_pg_headroom_wm_one_masic(self): + self.executor(testData['show_pg_wm_hdrm_one_masic']) + + def test_show_pg_headroom_wm_all_masic(self): + self.executor(testData['show_pg_wm_hdrm_all_masic']) + + def test_show_pg_shared_pwm_one_masic(self): + self.executor(testData['show_pg_pwm_shared_one_masic']) + + def test_show_pg_shared_pwm_all_masic(self): + self.executor(testData['show_pg_pwm_shared_all_masic']) + + def test_show_pg_headroom_pwm_one_masic(self): + self.executor(testData['show_pg_pwm_hdrm_one_masic']) + + def test_show_pg_headroom_pwm_all_masic(self): + self.executor(testData['show_pg_pwm_hdrm_all_masic']) + + def test_show_queue_unicast_wm_one_masic(self): + self.executor(testData['show_q_wm_unicast_one_masic']) + + def test_show_queue_unicast_wm_all_masic(self): + self.executor(testData['show_q_wm_unicast_all_masic']) + + def test_show_queue_unicast_pwm_one_masic(self): + self.executor(testData['show_q_pwm_unicast_one_masic']) + + def test_show_queue_unicast_pwm_all_masic(self): + self.executor(testData['show_q_pwm_unicast_all_masic']) + + def test_show_queue_multicast_wm_one_masic(self): + self.executor(testData['show_q_wm_multicast_one_masic']) + + def test_show_queue_multicast_wm_all_masic(self): + self.executor(testData['show_q_wm_multicast_all_masic']) + + def test_show_queue_multicast_pwm_one_masic(self): + self.executor(testData['show_q_pwm_multicast_one_masic']) + + def test_show_queue_multicast_pwm_all_masic(self): + self.executor(testData['show_q_pwm_multicast_all_masic']) + + def test_show_queue_all_wm_one_masic(self): + self.executor(testData['show_q_wm_all_one_masic']) + + def test_show_queue_all_wm_all_masic(self): + self.executor(testData['show_q_wm_all_all_masic']) + + def test_show_queue_all_pwm_one_masic(self): + self.executor(testData['show_q_pwm_all_one_masic']) + + def test_show_queue_all_pwm_all_masic(self): + self.executor(testData['show_q_pwm_all_all_masic']) + + def test_show_buffer_pool_wm_one_masic(self): + self.executor(testData['show_buffer_pool_wm_one_masic']) + + def test_show_buffer_pool_wm_all_masic(self): + self.executor(testData['show_buffer_pool_wm_all_masic']) + + def test_show_buffer_pool_pwm_one_masic(self): + self.executor(testData['show_buffer_pool_pwm_one_masic']) + + def test_show_buffer_pool_pwm_all_masic(self): + self.executor(testData['show_buffer_pool_pwm_all_masic']) + + def test_show_headroom_pool_wm_one_masic(self): + self.executor(testData['show_hdrm_pool_wm_one_masic']) + + def test_show_headroom_pool_wm_all_masic(self): + self.executor(testData['show_hdrm_pool_wm_all_masic']) + + def test_show_headroom_pool_pwm_one_masic(self): + self.executor(testData['show_hdrm_pool_pwm_one_masic']) + + def test_show_headroom_pool_pwm_all_masic(self): + self.executor(testData['show_hdrm_pool_pwm_all_masic']) + + def test_show_invalid_namespace_masic(self): + self.executor(testData['show_invalid_namespace_masic']) + + def test_clear_headroom_one_masic(self): + self.executor(testData['clear_hdrm_pool_wm_one_masic']) + + def test_clear_headroom_all_masic(self): + self.executor(testData['clear_hdrm_pool_wm_all_masic']) + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + print("TEARDOWN") diff --git a/tests/ntp.conf b/tests/ntp.conf new file mode 100644 index 0000000000..58bf276dce --- /dev/null +++ b/tests/ntp.conf @@ -0,0 +1,37 @@ +############################################################################### +# This file was AUTOMATICALLY GENERATED. DO NOT MODIFY. +# Controlled by ntp-config.service +############################################################################### + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +# To avoid ntpd from panic and exit if the drift between new time and +# current system time is large. +tinker panic 0 + +driftfile /var/lib/ntpsec/ntp.drift +leapfile /usr/share/zoneinfo/leap-seconds.list + +server 10.1.1.1 iburst +restrict 10.1.1.1 kod limited nomodify noquery + +server 10.22.1.12 iburst +restrict 10.22.1.12 kod limited nomodify noquery + + +interface ignore wildcard + + +interface listen eth0 +interface listen 127.0.0.1 + + +# Access control configuration +# By default, exchange time with everybody, but don't allow configuration. +# NTPsec doesn't establish peer associations, and so nopeer has no effect, and +# has been removed from here +restrict default kod nomodify noquery limited + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 diff --git a/tests/pfc_input/pfc_test_vectors.py b/tests/pfc_input/pfc_test_vectors.py new file mode 100644 index 0000000000..20d6b59af3 --- /dev/null +++ b/tests/pfc_input/pfc_test_vectors.py @@ -0,0 +1,286 @@ +# Golden outputs +show_asym_all_asic0_masic = """\ +Namespace asic0 +Interface Asymmetric +------------ ------------ +Ethernet0 off +Ethernet4 off +Ethernet16 off +Ethernet-BP0 off +Ethernet-BP4 off + +""" + +show_asym_all_asic1_masic = """\ +Namespace asic1 +Interface Asymmetric +-------------- ------------ +Ethernet64 off +Ethernet-BP256 off +Ethernet-BP260 off + +""" + +show_asym_all_masic = """\ +Namespace asic0 +Interface Asymmetric +------------ ------------ +Ethernet0 off +Ethernet4 off +Ethernet16 off +Ethernet-BP0 off +Ethernet-BP4 off + +Namespace asic1 +Interface Asymmetric +-------------- ------------ +Ethernet64 off +Ethernet-BP256 off +Ethernet-BP260 off + +""" + +show_asym_intf_one_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ +Ethernet0 off + +""" + +show_asym_intf_all_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ +Ethernet0 off + +Namespace asic1 +Interface Asymmetric +----------- ------------ + +""" + +show_asym_intf_fake_one_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ + +""" + +show_prio_all_asic0_masic = """\ +Namespace asic0 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 +Ethernet-BP260 3,4 + +""" + +show_prio_all_asic1_masic = """\ +Namespace asic1 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 + +""" + +show_prio_all_masic = """\ +Namespace asic0 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 +Ethernet-BP260 3,4 + +Namespace asic1 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 + +""" + +show_prio_intf_one_masic = """\ +Namespace asic0 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +show_prio_intf_all_masic = """\ +Namespace asic0 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +Namespace asic1 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +show_prio_intf_fake_one_masic = """\ +Cannot find interface Ethernet1234 for Namespace asic0 +""" + +show_prio_intf_fake_all_masic = """\ +Cannot find interface Ethernet1234 for Namespace asic0 +Cannot find interface Ethernet1234 for Namespace asic1 +""" + +testData = { + 'pfc_show_asymmetric_all_asic0_masic': {'cmd': ['show', 'asymmetric', + '--namespace', 'asic0'], + 'rc': 0, + 'rc_output': show_asym_all_asic0_masic + }, + 'pfc_show_asymmetric_all_asic1_masic': {'cmd': ['show', 'asymmetric', + '--namespace', 'asic1'], + 'rc': 0, + 'rc_output': show_asym_all_asic1_masic + }, + 'pfc_show_asymmetric_all_masic': {'cmd': ['show', 'asymmetric'], + 'rc': 0, + 'rc_output': show_asym_all_masic + }, + 'pfc_show_asymmetric_intf_one_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_asym_intf_one_masic + }, + 'pfc_show_asymmetric_intf_all_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet0'], + 'rc': 0, + 'rc_output': show_asym_intf_all_masic + }, + 'pfc_show_asymmetric_intf_fake_one_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet1234', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_asym_intf_fake_one_masic + }, + 'pfc_show_priority_all_asic0_masic': {'cmd': ['show', 'priority', + '--namespace', 'asic0'], + 'rc': 0, + 'rc_output': show_prio_all_asic0_masic + }, + 'pfc_show_priority_all_asic1_masic': {'cmd': ['show', 'priority', + '--namespace', 'asic1'], + 'rc': 0, + 'rc_output': show_prio_all_asic1_masic + }, + 'pfc_show_priority_all_masic': {'cmd': ['show', 'priority'], + 'rc': 0, + 'rc_output': show_prio_all_masic + }, + 'pfc_show_priority_intf_one_masic': {'cmd': ['show', 'priority', + 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_prio_intf_one_masic + }, + 'pfc_show_priority_intf_all_masic': {'cmd': ['show', 'priority', + 'Ethernet0'], + 'rc': 0, + 'rc_output': show_prio_intf_all_masic + }, + 'pfc_show_priority_intf_fake_one_masic': {'cmd': ['show', 'priority', + 'Ethernet1234', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_prio_intf_fake_one_masic + }, + 'pfc_show_priority_intf_fake_all_masic': {'cmd': ['show', 'priority', + 'Ethernet1234'], + 'rc': 0, + 'rc_output': show_prio_intf_fake_all_masic + }, + 'pfc_config_asymmetric_one_masic': {'cmd': ['config', 'asymmetric', + 'on', 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT', 'Ethernet0', 'pfc_asym', 'on']] + }, + 'pfc_config_asymmetric_invalid_one_masic': {'cmd': ['config', 'asymmetric', + 'onn', 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config asymmetric [OPTIONS] ' + '[on|off] INTERFACE\nTry "cli config ' + 'asymmetric --help" for help.\n\n' + 'Error: Invalid value for "[on|off]": ' + 'invalid choice: onn. (choose from on, off)') + }, + 'pfc_config_asymmetric_all_masic': {'cmd': ['config', 'asymmetric', + 'on', 'Ethernet0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT', 'Ethernet0', 'pfc_asym', 'on'], + ['asic1', 'PORT', 'Ethernet0', 'pfc_asym', 'on']] + }, + 'pfc_config_asymmetric_invalid_all_masic': {'cmd': ['config', 'asymmetric', + 'onn', 'Ethernet0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config asymmetric [OPTIONS] ' + '[on|off] INTERFACE\nTry "cli config ' + 'asymmetric --help" for help.\n\n' + 'Error: Invalid value for "[on|off]": ' + 'invalid choice: onn. (choose from on, off)') + }, + 'pfc_config_priority_one_masic': {'cmd': ['config', 'priority', + 'on', 'Ethernet0', '5', + '--namespace', 'asic0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5']] + }, + 'pfc_config_priority_invalid_one_masic': {'cmd': ['config', 'priority', + 'onn', 'Ethernet0', '5', + '--namespace', 'asic0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config priority [OPTIONS] ' + '[on|off] INTERFACE [0|1|2|3|4|5|6|7]\n' + 'Try "cli config priority --help" for ' + 'help.\n\nError: Invalid value for ' + '"[on|off]": invalid choice: onn. ' + '(choose from on, off)') + }, + 'pfc_config_priority_all_masic': {'cmd': ['config', 'priority', + 'on', 'Ethernet0', '5'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5'], + ['asic1', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5']] + }, + 'pfc_config_priority_invalid_all_masic': {'cmd': ['config', 'priority', + 'onn', 'Ethernet0', '5'], + 'rc': 2, + 'rc_msg': ('Usage: cli config priority [OPTIONS] ' + '[on|off] INTERFACE [0|1|2|3|4|5|6|7]\n' + 'Try "cli config priority --help" for ' + 'help.\n\nError: Invalid value for ' + '"[on|off]": invalid choice: onn. ' + '(choose from on, off)') + }, +} diff --git a/tests/pfc_test.py b/tests/pfc_test.py index 101aa476cc..136dab2623 100644 --- a/tests/pfc_test.py +++ b/tests/pfc_test.py @@ -1,10 +1,10 @@ import os import sys +import json import pfc.main as pfc from .pfc_input.assert_show_output import pfc_cannot_find_intf, pfc_show_asymmetric_all, \ pfc_show_asymmetric_intf, pfc_show_priority_all, pfc_show_priority_intf, \ pfc_config_priority_on, pfc_asym_cannot_find_intf -from utilities_common.db import Db from click.testing import CliRunner from importlib import reload @@ -17,11 +17,15 @@ class TestPfcBase(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" - def executor(self, cliobj, command, expected_rc=0, expected_output=None, expected_cfgdb_entry=None, + def executor(self, cliobj, command, expected_rc=0, expected_output=None, expected_cfgdb_entries=None, runner=CliRunner()): - db = Db() - result = runner.invoke(cliobj, command, obj=db) + result = runner.invoke(cliobj, command) print(result.exit_code) print(result.output) @@ -32,21 +36,37 @@ def executor(self, cliobj, command, expected_rc=0, expected_output=None, expecte if expected_output: assert result.output == expected_output - if expected_cfgdb_entry: - (table, key, field, expected_val) = expected_cfgdb_entry - configdb = db.cfgdb - entry = configdb.get_entry(table, key) - assert entry.get(field) == expected_val + if expected_cfgdb_entries: + fd = open('/tmp/pfc_testdata.json', 'r') + cmp_data = json.load(fd) + for expected_cfgdb_entry in expected_cfgdb_entries: + (namespace, table, key, field, expected_val) = expected_cfgdb_entry + entry = cmp_data[namespace][table][key][field] + assert entry == expected_val + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ.pop("UTILITIES_UNIT_TESTING") + if os.path.isfile('/tmp/pfc_testdata.json'): + os.remove('/tmp/pfc_testdata.json') class TestPfc(TestPfcBase): - @classmethod def setup_class(cls): + super().setup_class() + from mock_tables import dbconnector from mock_tables import mock_single_asic reload(mock_single_asic) - dbconnector.load_namespace_config() + dbconnector.load_database_config() + + import utilities_common + reload(utilities_common.multi_asic) + reload(pfc) def test_pfc_show_asymmetric_all(self): self.executor(pfc.cli, ['show', 'asymmetric'], @@ -74,8 +94,10 @@ def test_pfc_show_priority_intf_fake(self): def test_pfc_config_asymmetric(self): self.executor(pfc.cli, ['config', 'asymmetric', 'on', 'Ethernet0'], - expected_cfgdb_entry=('PORT', 'Ethernet0', 'pfc_asym', 'on')) + # namespace, table, key, field, expected_val + expected_cfgdb_entries=[('', 'PORT', 'Ethernet0', 'pfc_asym', 'on')]) def test_pfc_config_priority(self): self.executor(pfc.cli, ['config', 'priority', 'on', 'Ethernet0', '5'], - expected_output=pfc_config_priority_on) + # namespace, table, key, field, expected_val + expected_cfgdb_entries=[('', 'PORT_QOS_MAP', 'Ethernet0', 'pfc_enable', '3,4,5')]) diff --git a/tests/portstat_db/on_sup_na/chassis_state_db.json b/tests/portstat_db/on_sup_na/chassis_state_db.json new file mode 100644 index 0000000000..d2e5771098 --- /dev/null +++ b/tests/portstat_db/on_sup_na/chassis_state_db.json @@ -0,0 +1,68 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc3": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet11/1": { + "state": "N/A", + "rx_ok": "N/A", + "rx_bps": "N/A", + "rx_pps": "N/A", + "rx_util": "N/A", + "rx_err": "N/A", + "rx_drop": "N/A", + "rx_ovr": "N/A", + "tx_ok": "N/A", + "tx_bps": "N/A", + "tx_pps": "N/A", + "tx_util": "N/A", + "tx_err": "N/A", + "tx_drop": "N/A", + "tx_ovr": "N/A" + } +} \ No newline at end of file diff --git a/tests/portstat_db/on_sup_no_counters/chassis_state_db.json b/tests/portstat_db/on_sup_no_counters/chassis_state_db.json new file mode 100644 index 0000000000..5c380954c3 --- /dev/null +++ b/tests/portstat_db/on_sup_no_counters/chassis_state_db.json @@ -0,0 +1,11 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + } +} \ No newline at end of file diff --git a/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json b/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json new file mode 100644 index 0000000000..6040a80776 --- /dev/null +++ b/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json @@ -0,0 +1,48 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + } +} \ No newline at end of file diff --git a/tests/portstat_test.py b/tests/portstat_test.py index 885c06662f..9c6f94d96a 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -8,8 +8,8 @@ from .utils import get_result_and_return_code from utilities_common.cli import UserCache -root_path = os.path.dirname(os.path.abspath(__file__)) -modules_path = os.path.dirname(root_path) +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") intf_counters_before_clear = """\ @@ -42,6 +42,27 @@ Ethernet8 N/A 100,317 0 0 """ +intf_fec_counters_fec_hist = """\ +Symbol Errors Per Codeword Codewords +---------------------------- ----------- +BIN0 1000000 +BIN1 900000 +BIN2 800000 +BIN3 700000 +BIN4 600000 +BIN5 500000 +BIN6 400000 +BIN7 300000 +BIN8 0 +BIN9 0 +BIN10 0 +BIN11 0 +BIN12 0 +BIN13 0 +BIN14 0 +BIN15 0 +""" + intf_fec_counters_period = """\ The rates are calculated within 3 seconds period IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR @@ -234,6 +255,36 @@ Time Since Counters Last Cleared............... None """ +intf_counters_on_sup = """\ + IFACE STATE RX_OK RX_BPS RX_UTIL RX_ERR RX_DRP RX_OVR TX_OK TX_BPS TX_UTIL\ + TX_ERR TX_DRP TX_OVR +------------ ------- ------- --------- --------- -------- -------- -------- ------- --------- ---------\ + -------- -------- -------- + Ethernet1/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 + Ethernet2/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +Ethernet11/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +""" + +intf_counters_on_sup_no_counters = "Linecard Counter Table is not available.\n" + +intf_counters_on_sup_partial_lc = "Not all linecards have published their counter values.\n" + +intf_counters_on_sup_na = """\ + IFACE STATE RX_OK RX_BPS RX_UTIL RX_ERR RX_DRP RX_OVR TX_OK TX_BPS TX_UTIL\ + TX_ERR TX_DRP TX_OVR +------------ ------- ------- --------- --------- -------- -------- -------- ------- --------- ---------\ + -------- -------- -------- + Ethernet1/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 + Ethernet2/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +Ethernet11/1 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A N/A +""" + TEST_PERIOD = 3 @@ -320,6 +371,15 @@ def test_show_intf_fec_counters(self): assert return_code == 0 assert result == intf_fec_counters + def test_show_intf_counters_fec_histogram(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"].commands["fec-histogram"], ["Ethernet0"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_fec_counters_fec_hist + def test_show_intf_fec_counters_period(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["counters"].commands["fec-stats"], @@ -397,13 +457,109 @@ def test_clear_intf_counters(self): assert return_code == 0 verify_after_clear(result, intf_counter_after_clear) + def test_show_intf_counters_on_sup(self): + remove_tmp_cnstat_file() + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + + def test_show_intf_counters_on_sup_no_counters(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_no_counters/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_no_counters + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_no_counters + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + + def test_show_intf_counters_on_sup_partial_lc(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_partial_lc/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_partial_lc + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_partial_lc + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + + def test_show_intf_counters_on_sup_na(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_na/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_na + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_na + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + @classmethod def teardown_class(cls): print("TEARDOWN") os.environ["PATH"] = os.pathsep.join( os.environ["PATH"].split(os.pathsep)[:-1]) os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" remove_tmp_cnstat_file() + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) class TestMultiAsicPortStat(object): diff --git a/tests/qos_config_input/0/config_qos.json b/tests/qos_config_input/0/config_qos.json index 40c1903a06..5ef4b07f8d 100644 --- a/tests/qos_config_input/0/config_qos.json +++ b/tests/qos_config_input/0/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,41 +515,79 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file diff --git a/tests/qos_config_input/1/config_qos.json b/tests/qos_config_input/1/config_qos.json index 40c1903a06..5ef4b07f8d 100644 --- a/tests/qos_config_input/1/config_qos.json +++ b/tests/qos_config_input/1/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,41 +515,79 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file diff --git a/tests/qos_config_input/config_qos.json b/tests/qos_config_input/config_qos.json index fd76373983..0d44b421bd 100644 --- a/tests/qos_config_input/config_qos.json +++ b/tests/qos_config_input/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,53 +515,91 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, "MPLS_TC_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1" + "0": "1", + "1": "1", + "2": "1", + "3": "3", + "4": "4", + "5": "2", + "6": "1", + "7": "1" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file diff --git a/tests/queue_counter_test.py b/tests/queue_counter_test.py index 391d004872..508550b9c8 100644 --- a/tests/queue_counter_test.py +++ b/tests/queue_counter_test.py @@ -22,6 +22,7 @@ show_queue_counters = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC0 0 0 0 0 @@ -55,6 +56,7 @@ Ethernet0 ALL28 N/A N/A N/A N/A Ethernet0 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 41 96 70 98 @@ -88,6 +90,7 @@ Ethernet4 ALL28 N/A N/A N/A N/A Ethernet4 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC0 0 0 0 0 @@ -123,6 +126,7 @@ """ show_queue_counters_nz = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC1 60 43 39 1 @@ -155,6 +159,7 @@ Ethernet0 ALL28 N/A N/A N/A N/A Ethernet0 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 41 96 70 98 @@ -188,6 +193,7 @@ Ethernet4 ALL28 N/A N/A N/A N/A Ethernet4 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC1 38 17 68 91 @@ -324,6 +330,7 @@ """] show_queue_counters_port = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC0 0 0 0 0 @@ -359,6 +366,7 @@ """ show_queue_counters_port_nz = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC1 38 17 68 91 @@ -1851,6 +1859,7 @@ show_queue_voq_counters = """\ +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet0 VOQ0 0 0 0 0 0 @@ -1862,6 +1871,7 @@ testsw|Ethernet0 VOQ6 33 17 94 74 17 testsw|Ethernet0 VOQ7 40 71 95 33 73 +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet4 VOQ0 54 8 93 78 29 @@ -1873,6 +1883,7 @@ testsw|Ethernet4 VOQ6 68 60 66 81 22 testsw|Ethernet4 VOQ7 63 4 48 76 53 +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet8 VOQ0 41 73 77 74 67 @@ -1887,6 +1898,7 @@ """ show_queue_voq_counters_nz = """\ +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet0 VOQ1 60 43 39 1 1 @@ -1897,6 +1909,7 @@ testsw|Ethernet0 VOQ6 33 17 94 74 17 testsw|Ethernet0 VOQ7 40 71 95 33 73 +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet4 VOQ0 54 8 93 78 29 @@ -1908,6 +1921,7 @@ testsw|Ethernet4 VOQ6 68 60 66 81 22 testsw|Ethernet4 VOQ7 63 4 48 76 53 +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet8 VOQ0 41 73 77 74 67 @@ -1958,6 +1972,7 @@ ] show_queue_port_voq_counters = """\ +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet0 VOQ0 0 0 0 0 0 @@ -1972,6 +1987,7 @@ """ show_queue_port_voq_counters_nz = """\ +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet0 VOQ1 60 43 39 1 1 diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py index 9883dfa16b..57a220be1e 100644 --- a/tests/remote_cli_test.py +++ b/tests/remote_cli_test.py @@ -11,6 +11,7 @@ import select import socket import termios +import getpass MULTI_LC_REXEC_OUTPUT = '''======== LINE-CARD0|sonic-lc1 output: ======== hello world @@ -75,17 +76,27 @@ def mock_paramiko_connection(channel): return conn +def mock_getpass(prompt="Password:", stream=None): + return "dummy" + + class TestRemoteExec(object): + __getpass = getpass.getpass + @classmethod def setup_class(cls): print("SETUP") from .mock_tables import dbconnector dbconnector.load_database_config() + getpass.getpass = mock_getpass + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + getpass.getpass = TestRemoteExec.__getpass @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - # @mock.patch.object(linecard.Linecard, '_get_password', mock.MagicMock(return_value='dummmy')) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_command())) def test_rexec_with_module_name(self): @@ -98,7 +109,6 @@ def test_rexec_with_module_name(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_command())) def test_rexec_with_hostname(self): @@ -111,7 +121,6 @@ def test_rexec_with_hostname(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_error_cmd())) def test_rexec_error_with_module_name(self): @@ -133,7 +142,6 @@ def test_rexec_error(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_all(self): @@ -147,7 +155,6 @@ def test_rexec_all(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_invalid_lc(self): @@ -161,7 +168,6 @@ def test_rexec_invalid_lc(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_unreachable_lc(self): @@ -175,7 +181,6 @@ def test_rexec_unreachable_lc(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_help(self): @@ -188,7 +193,6 @@ def test_rexec_help(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', 22): "None"}))) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) @@ -202,7 +206,6 @@ def test_rexec_exception(self): assert "Failed to connect to sonic-lc1 with username admin\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', 22): "None"}))) def test_rexec_with_user_param(self): @@ -214,6 +217,19 @@ def test_rexec_with_user_param(self): assert result.exit_code == 1, result.output assert "Failed to connect to sonic-lc1 with username testuser\n" == result.output + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + def test_rexec_without_password_input(self): + runner = CliRunner() + getpass.getpass = TestRemoteExec.__getpass + LINECARD_NAME = "all" + result = runner.invoke( + rexec.cli, [LINECARD_NAME, "-c", "show version"]) + getpass.getpass = mock_getpass + print(result.output) + assert result.exit_code == 1, result.output + assert "Aborted" in result.output + class TestRemoteCLI(object): @classmethod diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 26c632d742..1f92b3d19a 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -252,11 +252,8 @@ def run_test(self, ct_data): def mock_check_output(self, ct_data, *args, **kwargs): ns = self.extract_namespace_from_args(args[0]) - if 'show runningconfiguration bgp' in ' '.join(args[0]): - return 'bgp suppress-fib-pending' - else: - routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) - return json.dumps(routes) + routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) + return json.dumps(routes) def assert_results(self, ct_data, ret, res): expect_ret = ct_data.get(RET, 0) diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 0e58daa18e..d8d13df1c0 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -1631,11 +1631,16 @@ def test_load_port_config(self, mock_is_multi_asic): @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.ConfigDBConnector') + @patch('sfputil.main.SonicV2Connector') @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) - def test_debug_loopback(self, mock_chassis): + @patch('sonic_py_common.multi_asic.get_front_end_namespaces', MagicMock(return_value=[''])) + def test_debug_loopback(self, mock_sonic_v2_connector, mock_config_db_connector, mock_chassis): mock_sfp = MagicMock() mock_api = MagicMock() + mock_config_db_connector.return_value = MagicMock() + mock_sonic_v2_connector.return_value = MagicMock() mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) mock_sfp.get_presence.return_value = True mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) @@ -1643,31 +1648,75 @@ def test_debug_loopback(self, mock_chassis): runner = CliRunner() mock_sfp.get_presence.return_value = False result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: SFP EEPROM not detected\n' mock_sfp.get_presence.return_value = True mock_sfp.get_xcvr_api = MagicMock(side_effect=NotImplementedError) result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: This functionality is not implemented\n' assert result.exit_code == ERROR_NOT_IMPLEMENTED mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) - assert result.output == 'Ethernet0: Set host-side-input loopback\n' + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: enable host-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: enable media-side-input loopback\n' assert result.exit_code != ERROR_NOT_IMPLEMENTED mock_api.set_loopback_mode.return_value = False result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "none"]) - assert result.output == 'Ethernet0: Set none loopback failed\n' + ["Ethernet0", "media-side-output", "enable"]) + assert result.output == 'Ethernet0: enable media-side-output loopback failed\n' assert result.exit_code == EXIT_FAIL mock_api.set_loopback_mode.return_value = True mock_api.set_loopback_mode.side_effect = AttributeError result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "none"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: Set loopback mode is not applicable for this module\n' assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.side_effect = [TypeError, True] + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: Set loopback mode failed. Parameter is not supported\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db = MagicMock() + mock_config_db.get.side_effect = TypeError + mock_config_db_connector.return_value = mock_config_db + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: subport is not present in CONFIG_DB\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db_connector.return_value = None + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: Failed to connect to CONFIG_DB\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db_connector.return_value = MagicMock() + mock_sonic_v2_connector.return_value = None + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: Failed to connect to STATE_DB\n' + assert result.exit_code == EXIT_FAIL + + @pytest.mark.parametrize("subport, lane_count, expected_mask", [ + (1, 1, 0x1), + (1, 4, 0xf), + (2, 1, 0x2), + (2, 4, 0xf0), + (3, 2, 0x30), + (4, 1, 0x8), + ]) + def test_get_subport_lane_mask(self, subport, lane_count, expected_mask): + assert sfputil.get_subport_lane_mask(subport, lane_count) == expected_mask diff --git a/tests/show_bmp_test.py b/tests/show_bmp_test.py new file mode 100644 index 0000000000..c0bc556d10 --- /dev/null +++ b/tests/show_bmp_test.py @@ -0,0 +1,178 @@ +import os +from click.testing import CliRunner +from utilities_common.db import Db + +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "bmp_input") + + +class TestShowBmp(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def set_db_values(self, db, key, kvs): + for field, value in kvs.items(): + db.set(db.BMP_STATE_DB, key, field, value) + + def test_show_bmp_neighbor_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_NEIGHBOR_TABLE|10.0.1.1", + {"peer_addr": "10.0.0.61", + "peer_asn": "64915", + "peer_rd": "300", + "peer_port": "5000", + "local_addr": "10.1.0.32", + "local_asn": "65100", + "local_port": "6000", + "sent_cap": "supports-mpbgp,supports-graceful-restart", + "recv_cap": "supports-mpbgp,supports-graceful-restart"}) + self.set_db_values(dbconnector, + "BGP_NEIGHBOR_TABLE|10.0.1.2", + {"peer_addr": "10.0.0.62", + "peer_asn": "64915", + "peer_rd": "300", + "peer_port": "5000", + "local_addr": "10.1.0.32", + "local_asn": "65100", + "local_port": "6000", + "sent_cap": "supports-mpbgp,supports-graceful-restart", + "recv_cap": "supports-mpbgp,supports-graceful-restart"}) + + expected_output = """\ +Total number of bmp neighbors: 2 +Neighbor_Address Peer_Address Peer_ASN Peer_RD Peer_Port Local_Address Local_ASN \ +Local_Port Advertised_Capabilities Received_Capabilities +------------------ -------------- ---------- --------- ----------- --------------- ----------- \ +------------ ---------------------------------------- ---------------------------------------- +10.0.0.61 10.0.0.61 64915 300 5000 10.1.0.32 65100 6000 \ +supports-mpbgp,supports-graceful-restart supports-mpbgp,supports-graceful-restart +10.0.0.62 10.0.0.62 64915 300 5000 10.1.0.32 65100 6000 \ +supports-mpbgp,supports-graceful-restart supports-mpbgp,supports-graceful-restart +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-neighbor-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_show_bmp_rib_out_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_RIB_OUT_TABLE|20c0:ef50::/64|10.0.0.57", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "fc00::7e", + "local_pref": "0", + "originator_id": "0", + "community_list": "residential", + "ext_community_list": "traffic_engineering"}) + self.set_db_values(dbconnector, + "BGP_RIB_OUT_TABLE|192.181.168.0/25|10.0.0.59", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "10.0.0.63", + "local_pref": "0", + "originator_id": "0", + "community_list": "business", + "ext_community_list": "preferential_transit"}) + + expected_output = """\ +Total number of bmp bgp-rib-out-table: 2 +Neighbor_Address NLRI Origin AS_Path Origin_AS Next_Hop Local_Pref \ +Originator_ID Community_List Ext_Community_List +------------------ ---------------- -------- ----------- ----------- ---------- ------------ \ +--------------- ---------------- -------------------- +10.0.0.57 20c0:ef50::/64 igp 65100 64600 64915 fc00::7e 0 \ +0 residential traffic_engineering +10.0.0.59 192.181.168.0/25 igp 65100 64600 64915 10.0.0.63 0 \ +0 business preferential_transit +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-rib-out-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_show_bmp_rib_in_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_RIB_IN_TABLE|20c0:ef50::/64|10.0.0.57", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "fc00::7e", + "local_pref": "0", + "originator_id": "0", + "community_list": "residential", + "ext_community_list": "traffic_engineering"}) + self.set_db_values(dbconnector, + "BGP_RIB_IN_TABLE|192.181.168.0/25|10.0.0.59", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "10.0.0.63", + "local_pref": "0", + "originator_id": "0", + "community_list": "business", + "ext_community_list": "preferential_transit"}) + + expected_output = """\ +Total number of bmp bgp-rib-in-table: 2 +Neighbor_Address NLRI Origin AS_Path Origin_AS Next_Hop Local_Pref \ +Originator_ID Community_List Ext_Community_List +------------------ ---------------- -------- ----------- ----------- ---------- ------------ \ +--------------- ---------------- -------------------- +10.0.0.57 20c0:ef50::/64 igp 65100 64600 64915 fc00::7e 0 \ +0 residential traffic_engineering +10.0.0.59 192.181.168.0/25 igp 65100 64600 64915 10.0.0.63 0 \ +0 business preferential_transit +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-rib-in-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_tables(self): + runner = CliRunner() + db = Db() + db.cfgdb.mod_entry("BMP", "table", {'bgp_neighbor_table': 'true'}) + db.cfgdb.mod_entry("BMP", "table", {'bgp_rib_in_table': 'false'}) + db.cfgdb.mod_entry("BMP", "table", {'bgp_rib_out_table': 'true'}) + + assert db.cfgdb.get_entry('BMP', 'table')['bgp_neighbor_table'] == 'true' + assert db.cfgdb.get_entry('BMP', 'table')['bgp_rib_in_table'] == 'false' + assert db.cfgdb.get_entry('BMP', 'table')['bgp_rib_out_table'] == 'true' + + expected_output = """\ +BMP tables: +Table_Name Enabled +------------------ --------- +bgp_neighbor_table true +bgp_rib_in_table false +bgp_rib_out_table true +""" + result = runner.invoke(show.cli.commands['bmp'].commands['tables'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" diff --git a/tests/show_ip_route_common.py b/tests/show_ip_route_common.py index 101b23309c..899915a1f4 100644 --- a/tests/show_ip_route_common.py +++ b/tests/show_ip_route_common.py @@ -875,3 +875,60 @@ Totals 6467 6466 """ + +SHOW_IP_ROUTE_REMOTE_LC = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued route, r - rejected route + +B> 0.0.0.0/0 [200/0] via 20.1.24.128, recursive via iBGP 04w0d12h + via 20.1.16.128, recursive via iBGP 04w0d12h + via 20.1.8.128, recursive via iBGP 04w0d12h + via 20.1.0.128, recursive via iBGP 04w0d12h +""" + +SHOW_IP_ROUTE_LC = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued route, r - rejected route + +B>*0.0.0.0/0 [20/0] via 20.1.24.128, PortChannel13, 04w0d11h + * via 20.1.16.128, PortChannel9, 04w0d11h + * via 20.1.8.128, PortChannel5, 04w0d11h + * via 20.1.0.128, PortChannel1, 04w0d11h +""" + +SHOW_IP_ROUTE_REMOTE_LC_DEFAULT_ROUTE = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 200, metric 0, best + Last update 04w0d12h ago + * 20.1.24.128 recursive via iBGP + * 20.1.16.128 recursive via iBGP + * 20.1.8.128 recursive via iBGP + * 20.1.0.128 recursive via iBGP + +""" + +SHOW_IP_ROUTE_LC_DEFAULT_ROUTE = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 20, metric 0, best + Last update 04w0d11h ago + * 20.1.24.128, via PortChannel13 + * 20.1.16.128, via PortChannel9 + * 20.1.8.128, via PortChannel5 + * 20.1.0.128, via PortChannel1 + +""" + +SHOW_IP_ROUTE_LC_DEFAULT_ROUTE_2 = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 20, metric 0, best + Last update 01:01:51 ago + * 10.0.0.7, via PortChannel106 + * 10.0.0.1, via PortChannel102 + +""" diff --git a/tests/show_test.py b/tests/show_test.py index ad88eeaaa2..819f197343 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1070,6 +1070,20 @@ def test_rc_syslog(self, mock_rc): assert result.exit_code == 0 assert '[1.1.1.1]' in result.output + @patch('builtins.open', mock_open( + read_data=open('tests/ntp.conf').read())) + def test_ntp(self): + runner = CliRunner() + + result = runner.invoke( + show.cli.commands['runningconfiguration'].commands['ntp']) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert '10.1.1.1' in result.output + assert '10.22.1.12' in result.output + @classmethod def teardown_class(cls): print('TEARDOWN') diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py index 98db887941..3d6beae9ff 100644 --- a/tests/sonic_package_manager/conftest.py +++ b/tests/sonic_package_manager/conftest.py @@ -412,7 +412,6 @@ def sonic_fs(fs): fs.create_dir(SERVICE_MGMT_SCRIPT_LOCATION) fs.create_file(GENERATED_SERVICES_CONF_FILE) fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_FILE_TEMPLATE)) - fs.create_file(os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_MGMT_SCRIPT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, DOCKER_CTL_SCRIPT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, DEBUG_DUMP_SCRIPT_TEMPLATE)) diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index 8278a8da2b..319dcf32ff 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -137,20 +137,6 @@ def read_file(name): assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) -def test_service_creator_with_timer_unit(sonic_fs, manifest, service_creator): - entry = PackageEntry('test', 'azure/sonic-test') - package = Package(entry, Metadata(manifest)) - service_creator.create(package) - - assert not sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) - - manifest['service']['delayed'] = True - package = Package(entry, Metadata(manifest)) - service_creator.create(package) - - assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) - - def test_service_creator_with_debug_dump(sonic_fs, manifest, service_creator): entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) @@ -396,27 +382,6 @@ def test_feature_update(mock_sonic_db, manifest): ], any_order=True) -def test_feature_registration_with_timer(mock_sonic_db, manifest): - manifest['service']['delayed'] = True - mock_connector = Mock() - mock_connector.get_entry = Mock(return_value={}) - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) - mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_connector) - feature_registry = FeatureRegistry(mock_sonic_db) - feature_registry.register(manifest) - mock_connector.set_entry.assert_called_with('FEATURE', 'test', { - 'state': 'disabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'local', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'delayed': 'True', - 'check_up_status': 'False', - 'support_syslog_rate_limit': 'False', - }) - - def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): mock_connector = Mock() mock_connector.get_entry = Mock(return_value={}) diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py new file mode 100644 index 0000000000..b4dcc7d4bc --- /dev/null +++ b/tests/suppress_pending_fib_test.py @@ -0,0 +1,34 @@ +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + + +class TestSuppressFibPending: + def test_synchronous_mode(self): + runner = CliRunner() + + db = Db() + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) + print(result.output) + assert result.exit_code == 0 + assert db.cfgdb.get_entry('DEVICE_METADATA', 'localhost')['suppress-fib-pending'] == 'enabled' + + result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) + assert result.exit_code == 0 + assert result.output == 'Enabled\n' + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) + print(result.output) + assert result.exit_code == 0 + assert db.cfgdb.get_entry('DEVICE_METADATA', 'localhost')['suppress-fib-pending'] == 'disabled' + + result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) + assert result.exit_code == 0 + assert result.output == 'Disabled\n' + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['invalid-input'], obj=db) + print(result.output) + assert result.exit_code != 0 diff --git a/tests/watermarkstat_test.py b/tests/watermarkstat_test.py index dc419ae3b9..6a2ebfa2cf 100644 --- a/tests/watermarkstat_test.py +++ b/tests/watermarkstat_test.py @@ -1,11 +1,9 @@ import os import sys import pytest - import show.main as show from click.testing import CliRunner - -from .wm_input.wm_test_vectors import * +from wm_input.wm_test_vectors import testData test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -84,12 +82,14 @@ def executor(self, testcase): else: exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]] - result = runner.invoke(exec_cmd, []) + args = [] if 'args' not in input else input['args'] + result = runner.invoke(exec_cmd, args) print(result.exit_code) print(result.output) - assert result.exit_code == 0 + expected_code = 0 if 'rc' not in input else input['rc'] + assert result.exit_code == expected_code assert result.output == input['rc_output'] @classmethod diff --git a/tests/wm_input/wm_test_vectors.py b/tests/wm_input/wm_test_vectors.py index 93d9faa4cb..f0a80cf9cb 100644 --- a/tests/wm_input/wm_test_vectors.py +++ b/tests/wm_input/wm_test_vectors.py @@ -1,3 +1,373 @@ +show_pg_wm_shared_output_one_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n") + +show_pg_wm_shared_output_all_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- ----- " + "----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 " + "PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_wm_hdrm_output_one_masic = ( + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_wm_hdrm_output_all_masic = ( + "Ingress headroom per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_persistent_wm_shared_output_one_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_shared_output_all_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + " Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7" + " PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_hdrm_output_one_masic = ( + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_hdrm_output_all_masic = ( + "Ingress headroom per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 200 201 202 203 204 205 206 207 500" + " 501 502 503 504 505 506 507\n" + " Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0\n" + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 " + "PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- ----- " + "----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207 " + "500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A " + "N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_queue_wm_unicast_output_one_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ------- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 2057328 20 2 0 0 2 2 28 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_unicast_output_all_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic0) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +------------ ----- ----- ----- ----- ----- ----- ----- ----- + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ------- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 2057328 20 2 0 0 2 2 28 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_unicast_output_one_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_unicast_output_all_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic0) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +------------ ----- ----- ----- ----- ----- ----- ----- ----- + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_multicast_output_one_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +""" + +show_queue_wm_multicast_output_all_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +Egress shared pool occupancy per multicast queue: (Namespace asic1) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +-------------- ----- ----- ------ ------- ------ ------ ------ ------ +Ethernet-BP256 2 0 5 2057328 208 20 228 2 +Ethernet-BP260 0 0 0 0 0 0 0 0 +""" + +show_queue_pwm_multicast_output_one_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +""" + +show_queue_pwm_multicast_output_all_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +Egress shared pool occupancy per multicast queue: (Namespace asic1) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +-------------- ----- ----- ------ ------ ------ ------ ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 0 0 0 0 0 0 0 0 +""" + +show_queue_wm_all_output_one_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_all_output_all_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic0) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +------------ ------ ------ ------- ------- ------- ------- ------- ------- + Ethernet0 0 0 0 0 0 0 0 0 + Ethernet4 0 0 0 0 0 0 0 0 +Ethernet-BP0 0 0 0 0 0 0 0 0 +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_all_output_one_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_all_output_all_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic0) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +------------ ------ ------ ------- ------- ------- ------- ------- ------- + Ethernet0 0 0 0 0 0 0 0 0 + Ethernet4 0 0 0 0 0 0 0 0 +Ethernet-BP0 0 0 0 0 0 0 0 0 +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_buffer_pool_wm_output_one_masic = """\ +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 3000 +""" + +show_buffer_pool_wm_output_all_masic = """\ +Shared pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 3000 +""" + +show_buffer_pool_pwm_output_one_masic = """\ +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_buffer_pool_pwm_output_all_masic = """\ +Shared pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_hdrm_pool_wm_output_one_masic = """\ +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 432640 +""" + +show_hdrm_pool_wm_output_all_masic = """\ +Headroom pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 432640 +""" + +show_hdrm_pool_pwm_output_one_masic = """\ +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_hdrm_pool_pwm_output_all_masic = """\ +Headroom pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +clear_hdrm_pool_wm_output_one_masic = """\ +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic0 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +""" + +clear_hdrm_pool_wm_output_all_masic = """\ +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic0 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic1 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +""" + show_pg_wm_shared_output="""\ Ingress shared pool occupancy per PG: Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 @@ -124,56 +494,198 @@ 'rc_output': show_pg_wm_hdrm_output } ], - 'show_pg_pwm_shared' : [ {'cmd' : ['priority-group', 'persistent-watermark', 'shared'], - 'rc_output': show_pg_persistent_wm_shared_output - } - ], - 'show_pg_pwm_hdrm' : [ {'cmd' : ['priority-group', 'persistent-watermark', 'headroom'], - 'rc_output': show_pg_persistent_wm_hdrm_output - } - ], - 'show_q_wm_unicast' : [ {'cmd' : ['queue', 'watermark', 'unicast'], - 'rc_output': show_queue_wm_unicast_output + 'show_pg_pwm_shared': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'rc_output': show_pg_persistent_wm_shared_output } ], - 'show_q_pwm_unicast' : [ {'cmd' : ['queue', 'persistent-watermark', 'unicast'], - 'rc_output': show_queue_pwm_unicast_output - } + 'show_pg_pwm_hdrm': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'rc_output': show_pg_persistent_wm_hdrm_output + } + ], + 'show_q_wm_unicast': [{'cmd': ['queue', 'watermark', 'unicast'], + 'rc_output': show_queue_wm_unicast_output + } + ], + 'show_q_pwm_unicast': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'rc_output': show_queue_pwm_unicast_output + } + ], + 'show_q_wm_multicast': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output + } ], - 'show_q_wm_multicast' : [ {'cmd' : ['queue', 'watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_output - } - ], - 'show_q_wm_multicast_neg' : [ { 'cmd' : ['queue', 'watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_neg_output - } + 'show_q_wm_multicast_neg': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_neg_output + } ], - 'show_q_pwm_multicast' : [ {'cmd' : ['queue', 'persistent-watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_output - } - ], - 'show_q_wm_all' : [ {'cmd' : ['queue', 'watermark', 'all'], - 'rc_output': show_queue_wm_all_output - } - ], - 'show_q_pwm_all' : [ {'cmd' : ['queue', 'persistent-watermark', 'all'], - 'rc_output': show_queue_pwm_all_output - } - ], - 'show_buffer_pool_wm' : [ {'cmd' : ['buffer_pool', 'watermark'], - 'rc_output': show_buffer_pool_wm_output - } + 'show_q_pwm_multicast': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output + } ], - 'show_buffer_pool_pwm' : [ {'cmd' : ['buffer_pool', 'persistent-watermark'], - 'rc_output': show_buffer_pool_persistent_wm_output - } - ], - 'show_hdrm_pool_wm' : [ {'cmd' : ['headroom-pool', 'watermark'], - 'rc_output': show_hdrm_pool_wm_output + 'show_q_wm_all': [{'cmd': ['queue', 'watermark', 'all'], + 'rc_output': show_queue_wm_all_output + } + ], + 'show_q_pwm_all': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'rc_output': show_queue_pwm_all_output + } + ], + 'show_buffer_pool_wm': [{'cmd': ['buffer_pool', 'watermark'], + 'rc_output': show_buffer_pool_wm_output } - ], - 'show_hdrm_pool_pwm' : [ {'cmd' : ['headroom-pool', 'persistent-watermark'], - 'rc_output': show_hdrm_pool_persistent_wm_output + ], + 'show_buffer_pool_pwm': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'rc_output': show_buffer_pool_persistent_wm_output } - ] + ], + 'show_hdrm_pool_wm': [{'cmd': ['headroom-pool', 'watermark'], + 'rc_output': show_hdrm_pool_wm_output + } + ], + 'show_hdrm_pool_pwm': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'rc_output': show_hdrm_pool_persistent_wm_output + } + ], + 'show_pg_wm_shared_one_masic': [{'cmd': ['priority-group', 'watermark', 'shared'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_pg_wm_shared_output_one_masic + } + ], + 'show_pg_wm_shared_all_masic': [{'cmd': ['priority-group', 'watermark', 'shared'], + 'rc_output': show_pg_wm_shared_output_all_masic + } + ], + 'show_pg_wm_hdrm_one_masic': [{'cmd': ['priority-group', 'watermark', 'headroom'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_wm_hdrm_output_one_masic + } + ], + 'show_pg_wm_hdrm_all_masic': [{'cmd': ['priority-group', 'watermark', 'headroom'], + 'rc_output': show_pg_wm_hdrm_output_all_masic + } + ], + 'show_pg_pwm_shared_one_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_persistent_wm_shared_output_one_masic + } + ], + 'show_pg_pwm_shared_all_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'rc_output': show_pg_persistent_wm_shared_output_all_masic + } + ], + 'show_pg_pwm_hdrm_one_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_persistent_wm_hdrm_output_one_masic + } + ], + 'show_pg_pwm_hdrm_all_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'rc_output': show_pg_persistent_wm_hdrm_output_all_masic + } + ], + 'show_q_wm_unicast_one_masic': [{'cmd': ['queue', 'watermark', 'unicast'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_wm_unicast_output_one_masic + } + ], + 'show_q_wm_unicast_all_masic': [{'cmd': ['queue', 'watermark', 'unicast'], + 'rc_output': show_queue_wm_unicast_output_all_masic + } + ], + 'show_q_pwm_unicast_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_pwm_unicast_output_one_masic + } + ], + 'show_q_pwm_unicast_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'rc_output': show_queue_pwm_unicast_output_all_masic + } + ], + 'show_q_wm_multicast_one_masic': [{'cmd': ['queue', 'watermark', 'multicast'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_queue_wm_multicast_output_one_masic + } + ], + 'show_q_wm_multicast_all_masic': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output_all_masic + } + ], + 'show_q_pwm_multicast_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_queue_pwm_multicast_output_one_masic + } + ], + 'show_q_pwm_multicast_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'rc_output': show_queue_pwm_multicast_output_all_masic + } + ], + 'show_q_wm_all_one_masic': [{'cmd': ['queue', 'watermark', 'all'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_wm_all_output_one_masic + } + ], + 'show_q_wm_all_all_masic': [{'cmd': ['queue', 'watermark', 'all'], + 'rc_output': show_queue_wm_all_output_all_masic + } + ], + 'show_q_pwm_all_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_pwm_all_output_one_masic + } + ], + 'show_q_pwm_all_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'rc_output': show_queue_pwm_all_output_all_masic + } + ], + 'show_buffer_pool_wm_one_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_buffer_pool_wm_output_one_masic + } + ], + 'show_buffer_pool_wm_all_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'rc_output': show_buffer_pool_wm_output_all_masic + } + ], + 'show_buffer_pool_pwm_one_masic': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_buffer_pool_pwm_output_one_masic + } + ], + 'show_buffer_pool_pwm_all_masic': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'rc_output': show_buffer_pool_pwm_output_all_masic + } + ], + 'show_hdrm_pool_wm_one_masic': [{'cmd': ['headroom-pool', 'watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_hdrm_pool_wm_output_one_masic + } + ], + 'show_hdrm_pool_wm_all_masic': [{'cmd': ['headroom-pool', 'watermark'], + 'rc_output': show_hdrm_pool_wm_output_all_masic + } + ], + 'show_hdrm_pool_pwm_one_masic': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_hdrm_pool_pwm_output_one_masic + } + ], + 'show_hdrm_pool_pwm_all_masic': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'rc_output': show_hdrm_pool_pwm_output_all_masic + } + ], + 'show_invalid_namespace_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'args': ['--namespace', 'asic14'], + 'rc': 2, + 'rc_output': '' + } + ], + 'clear_hdrm_pool_wm_one_masic': [{'cmd': ['clear', 'watermarkstat', '-t', + 'pg_headroom', '-n', 'asic0', '-c'], + 'rc_output': clear_hdrm_pool_wm_output_one_masic + } + ], + 'clear_hdrm_pool_wm_all_masic': [{'cmd': ['clear', 'watermarkstat', '-t', + 'pg_headroom', '-c'], + 'rc_output': clear_hdrm_pool_wm_output_all_masic + } + ] } diff --git a/tests/wol_test.py b/tests/wol_test.py deleted file mode 100644 index 011676eeac..0000000000 --- a/tests/wol_test.py +++ /dev/null @@ -1,229 +0,0 @@ -import click -import io -import pytest -import wol.main as wol -from click.testing import CliRunner -from unittest.mock import patch, MagicMock - -ETHER_TYPE_WOL = b'\x08\x42' -BROADCAST_MAC = wol.MacAddress('ff:ff:ff:ff:ff:ff') - -SAMPLE_INTERFACE_ETH0 = "Ethernet0" -SAMPLE_INTERFACE_VLAN1000 = "Vlan1000" -SAMPLE_INTERFACE_PO100 = "PortChannel100" - -SAMPLE_ETH0_MAC = wol.MacAddress('11:33:55:77:99:bb') -SAMPLE_VLAN1000_MAC = wol.MacAddress('22:44:66:88:aa:cc') -SAMPLE_PO100_MAC = wol.MacAddress('33:55:77:99:bb:dd') -SAMPLE_TARGET_MAC = wol.MacAddress('44:66:88:aa:cc:ee') -SAMPLE_TARGET_MAC_LIST = [wol.MacAddress('44:66:88:aa:cc:ee'), wol.MacAddress('55:77:99:bb:dd:ff')] - -SAMPLE_MAGIC_PACKET_UNICAST = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 -SAMPLE_MAGIC_PACKET_BROADCAST = BROADCAST_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - - -class TestMacAddress(): - def test_init(self): - # Test Case 1: Test with a valid MAC address - assert wol.MacAddress('00:11:22:33:44:55').address == b'\x00\x11\x22\x33\x44\x55' - # Test Case 2: Test with an invalid MAC address - with pytest.raises(ValueError) as exc_info: - wol.MacAddress('INVALID_MAC_ADDRESS') - assert exc_info.value.message == "invalid MAC address" - with pytest.raises(ValueError) as exc_info: - wol.MacAddress('00:11:22:33:44') - assert exc_info.value.message == "invalid MAC address" - - def test_str(self): - assert str(wol.MacAddress('00:01:0a:a0:aa:ee')) == '00:01:0a:a0:aa:ee' - assert str(wol.MacAddress('ff:ff:ff:ff:ff:ff')) == 'ff:ff:ff:ff:ff:ff' - - def test_eq(self): - # Test Case 1: Test with two equal MAC addresses - assert wol.MacAddress('00:11:22:33:44:55') == wol.MacAddress('00:11:22:33:44:55') - # Test Case 2: Test with two unequal MAC addresses - assert wol.MacAddress('00:11:22:33:44:55') != wol.MacAddress('55:44:33:22:11:00') - - def test_to_bytes(self): - assert wol.MacAddress('00:11:22:33:44:55').to_bytes() == b'\x00\x11\x22\x33\x44\x55' - - -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_build_magic_packet(): - # Test Case 1: Test build magic packet basic - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=b'') == expected_output - # Test Case 2: Test build magic packet with broadcast flag - expected_output = BROADCAST_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=True, password=b'') == expected_output - # Test Case 3: Test build magic packet with 4-byte password - password = b'\x12\x34' - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 + password - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=password) == expected_output - # Test Case 4: Test build magic packet with 6-byte password - password = b'\x12\x34\x56\x78\x9a\xbc' - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 + password - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=password) == expected_output - - -def test_send_magic_packet(): - # Test Case 1: Test send magic packet with count is 1 - with patch('socket.socket') as mock_socket: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=1, interval=0, verbose=False) - mock_socket.return_value.bind.assert_called_once_with((SAMPLE_INTERFACE_ETH0, 0)) - mock_socket.return_value.send.assert_called_once_with(SAMPLE_MAGIC_PACKET_UNICAST) - # Test Case 2: Test send magic packet with count is 3 - with patch('socket.socket') as mock_socket: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=3, interval=0, verbose=False) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 3 - # Test Case 3: Test send magic packet with interval is 1000 - with patch('socket.socket') as mock_socket, \ - patch('time.sleep') as mock_sleep: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=3, interval=1000, verbose=False) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 3 - assert mock_sleep.call_count == 2 # sleep twice between 3 packets - mock_sleep.assert_called_with(1) - # Test Case 4: Test send magic packet with verbose is True - expected_verbose_output = f"Sending 5 magic packet to {SAMPLE_TARGET_MAC} via interface {SAMPLE_INTERFACE_ETH0}\n" + \ - f"1st magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"2nd magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"3rd magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"4th magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"5th magic packet sent to {SAMPLE_TARGET_MAC}\n" - with patch('socket.socket') as mock_socket, patch('time.sleep'), patch('sys.stdout', new_callable=io.StringIO) as mock_stdout: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=5, interval=1000, verbose=True) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 5 - assert mock_stdout.getvalue() == expected_verbose_output - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -def test_validate_interface(): - # Test Case 1: Test with a valid SONiC interface name - assert wol.validate_interface(None, None, SAMPLE_INTERFACE_ETH0) == SAMPLE_INTERFACE_ETH0 - # Test Case 2: Test with an invalid SONiC interface name - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_interface(None, None, "INVALID_SONIC_INTERFACE") - assert exc_info.value.message == "invalid SONiC interface name INVALID_SONIC_INTERFACE" - # Test Case 3: Test with an valid SONiC interface name, but the interface operstat is down - with patch('wol.main.get_interface_operstate', MagicMock(return_value="down")): - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_interface(None, None, SAMPLE_INTERFACE_ETH0) - assert exc_info.value.message == f"interface {SAMPLE_INTERFACE_ETH0} is not up" - - -def test_parse_target_mac(): - # Test Case 1: Test with a single valid target MAC address - wol.parse_target_mac(None, None, str(SAMPLE_TARGET_MAC)) == [SAMPLE_TARGET_MAC] - # Test Case 2: Test with a list of valid target MAC addresses - mac_list = [SAMPLE_ETH0_MAC, SAMPLE_VLAN1000_MAC, SAMPLE_PO100_MAC] - assert wol.parse_target_mac(None, None, ",".join([str(x) for x in mac_list])) == mac_list - # Test Case 3: Test with a single invalid target MAC address - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_target_mac(None, None, "INVALID_MAC_ADDRESS") - assert exc_info.value.message == "invalid MAC address INVALID_MAC_ADDRESS" - # Test Case 4: Test with a list of target MAC addresses, one of them is invalid - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_target_mac(None, None, ",".join([str(SAMPLE_ETH0_MAC), "INVALID_MAC_ADDRESS"])) - assert exc_info.value.message == "invalid MAC address INVALID_MAC_ADDRESS" - - -def test_parse_password(): - # Test Case 1: Test with an empty password - assert wol.parse_password(None, None, "") == b'' - # Test Case 2: Test with a valid 4-byte password - assert wol.parse_password(None, None, "1.2.3.4") == b'\x01\x02\x03\x04' - # Test Case 3: Test with an invalid 4-byte password - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "1.2.3.999") - assert exc_info.value.message == "invalid password 1.2.3.999" - # Test Case 4: Test with a valid 6-byte password - assert wol.parse_password(None, None, str(SAMPLE_TARGET_MAC)) == SAMPLE_TARGET_MAC.to_bytes() - # Test Case 5: Test with an invalid 6-byte password - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "11:22:33:44:55:999") - assert exc_info.value.message == "invalid password 11:22:33:44:55:999" - # Test Case 6: Test with an invalid password string - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "INVALID_PASSWORD") - assert exc_info.value.message == "invalid password INVALID_PASSWORD" - - -def test_validate_count_interval(): - # Test Case 1: input valid count and interval - assert wol.validate_count_interval(1, 1000) == (1, 1000) - # Test Case 2: Test with both count and interval are not provided - assert wol.validate_count_interval(None, None) == (1, 0) - # Test Case 3: Test count and interval not provided together - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_count_interval(3, None) - assert exc_info.value.message == "count and interval must be used together" - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_count_interval(None, 1000) - assert exc_info.value.message == "count and interval must be used together" - # Test Case 4: Test with count or interval not in valid range - # This restriction is validated by click.IntRange(), so no need to call the command line function - runner = CliRunner() - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '100', '-i', '1000']) - assert 'Invalid value for "-c": 100 is not in the valid range of 1 to 5.' in result.stdout - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '3', '-i', '100000']) - assert 'Invalid value for "-i": 100000 is not in the valid range of 0 to 2000.' in result.stdout - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.is_root', MagicMock(return_value=True)) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_wol_send_magic_packet_call_count(): - """ - Test the count of send_magic_packet() function call in wol is correct. - """ - runner = CliRunner() - # Test Case 1: Test with only required arguments - # 1.1 Single Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert result.exit_code == 0 - mock_send_magic_packet.assert_called_once_with(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, 1, 0, False) - # 1.2 Multiple Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, ','.join([str(v) for v in SAMPLE_TARGET_MAC_LIST])]) - assert result.exit_code == 0 - assert mock_send_magic_packet.call_count == 2 - # Test Case 2: Test with specified count and interval - # 2.1 Single Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '5', '-i', '1000']) - assert result.exit_code == 0 - mock_send_magic_packet.assert_called_once_with(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, 5, 1000, False) - # 2.2 Multiple Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, ','.join([str(v) for v in SAMPLE_TARGET_MAC_LIST]), '-c', '5', '-i', '1000']) - assert result.exit_code == 0 - assert mock_send_magic_packet.call_count == 2 - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.is_root', MagicMock(return_value=True)) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_wol_send_magic_packet_throw_exception(): - """ - Test the exception handling of send_magic_packet() function in wol. - """ - runner = CliRunner() - # Test Case 1: Test with OSError exception (interface flap) - with patch('wol.main.send_magic_packet', MagicMock(side_effect=OSError("[Errno 100] Network is down"))): - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert "Exception: [Errno 100] Network is down" in result.stdout - # Test Case 2: Test with other exception - with patch('wol.main.send_magic_packet', MagicMock(side_effect=Exception("Exception message"))): - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert "Exception: Exception message" in result.stdout diff --git a/utilities_common/flock.py b/utilities_common/flock.py new file mode 100644 index 0000000000..c8faa8bfd9 --- /dev/null +++ b/utilities_common/flock.py @@ -0,0 +1,89 @@ +"""File lock utilities.""" +import click +import fcntl +import functools +import inspect +import os +import sys +import time + +from sonic_py_common import logger + + +log = logger.Logger() + + +def acquire_flock(fd, timeout=-1): + """Acquire the flock.""" + flags = fcntl.LOCK_EX + if timeout >= 0: + flags |= fcntl.LOCK_NB + else: + timeout = 0 + + start_time = current_time = time.time() + ret = False + while current_time - start_time <= timeout: + try: + fcntl.flock(fd, flags) + except (IOError, OSError): + ret = False + else: + ret = True + break + current_time = time.time() + if timeout != 0: + time.sleep(0.2) + return ret + + +def release_flock(fd): + """Release the flock.""" + fcntl.flock(fd, fcntl.LOCK_UN) + + +def try_lock(lock_file, timeout=-1): + """Decorator to try lock file using fcntl.flock.""" + def _decorator(func): + @functools.wraps(func) + def _wrapper(*args, **kwargs): + bypass_lock = False + + # Get the bypass_lock argument from the function signature + func_signature = inspect.signature(func) + has_bypass_lock = "bypass_lock" in func_signature.parameters + if has_bypass_lock: + func_ba = func_signature.bind(*args, **kwargs) + func_ba.apply_defaults() + bypass_lock = func_ba.arguments["bypass_lock"] + + if bypass_lock: + click.echo(f"Bypass lock on {lock_file}") + return func(*args, **kwargs) + else: + fd = os.open(lock_file, os.O_CREAT | os.O_RDWR) + if acquire_flock(fd, timeout): + click.echo(f"Acquired lock on {lock_file}") + os.truncate(fd, 0) + # Write pid and the function name to the lock file as a record + os.write(fd, f"{func.__name__}, pid {os.getpid()}\n".encode()) + try: + return func(*args, **kwargs) + finally: + release_flock(fd) + click.echo(f"Released lock on {lock_file}") + os.truncate(fd, 0) + os.close(fd) + else: + click.echo(f"Failed to acquire lock on {lock_file}") + lock_owner = os.read(fd, 1024).decode() + if not lock_owner: + lock_owner = "unknown" + log.log_notice( + (f"{func.__name__} failed to acquire lock on {lock_file}," + f" which is taken by {lock_owner}") + ) + os.close(fd) + sys.exit(1) + return _wrapper + return _decorator diff --git a/utilities_common/multi_asic.py b/utilities_common/multi_asic.py index b1f24e12e8..4ebd728031 100644 --- a/utilities_common/multi_asic.py +++ b/utilities_common/multi_asic.py @@ -3,7 +3,6 @@ import click import netifaces -import pyroute2 from natsort import natsorted from sonic_py_common import multi_asic, device_info from utilities_common import constants @@ -170,6 +169,7 @@ def multi_asic_args(parser=None): return parser def multi_asic_get_ip_intf_from_ns(namespace): + import pyroute2 if namespace != constants.DEFAULT_NAMESPACE: pyroute2.netns.pushns(namespace) interfaces = natsorted(netifaces.interfaces()) @@ -181,6 +181,7 @@ def multi_asic_get_ip_intf_from_ns(namespace): def multi_asic_get_ip_intf_addr_from_ns(namespace, iface): + import pyroute2 if namespace != constants.DEFAULT_NAMESPACE: pyroute2.netns.pushns(namespace) ipaddresses = netifaces.ifaddresses(iface) diff --git a/utilities_common/netstat.py b/utilities_common/netstat.py index 5f17c1f4c6..21b1a0faeb 100755 --- a/utilities_common/netstat.py +++ b/utilities_common/netstat.py @@ -118,3 +118,12 @@ def format_util(brate, port_rate): util = brate/(float(port_rate)*1000*1000/8.0)*100 return "{:.2f}%".format(util) + +def format_util_directly(util): + """ + Format the util without calculation. + """ + if util == STATUS_NA: + return STATUS_NA + else: + return "{:.2f}%".format(float(util)) diff --git a/utilities_common/portstat.py b/utilities_common/portstat.py new file mode 100644 index 0000000000..6942fa5f2a --- /dev/null +++ b/utilities_common/portstat.py @@ -0,0 +1,666 @@ +import datetime +import time +from collections import OrderedDict, namedtuple + +from natsort import natsorted +from tabulate import tabulate +from sonic_py_common import multi_asic +from sonic_py_common import device_info +from swsscommon.swsscommon import SonicV2Connector, CounterTable, PortCounter + +from utilities_common import constants +import utilities_common.multi_asic as multi_asic_util +from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, \ + format_util, format_number_with_comma, format_util_directly + +""" +The order and count of statistics mentioned below needs to be in sync with the values in portstat script +So, any fields added/deleted in here should be reflected in portstat script also +""" +NStats = namedtuple("NStats", "rx_ok, rx_err, rx_drop, rx_ovr, tx_ok,\ + tx_err, tx_drop, tx_ovr, rx_byt, tx_byt,\ + rx_64, rx_65_127, rx_128_255, rx_256_511, rx_512_1023,\ + rx_1024_1518, rx_1519_2047, rx_2048_4095, rx_4096_9216, rx_9217_16383,\ + rx_uca, rx_mca, rx_bca, rx_all,\ + tx_64, tx_65_127, tx_128_255, tx_256_511, tx_512_1023, tx_1024_1518,\ + tx_1519_2047, tx_2048_4095, tx_4096_9216, tx_9217_16383,\ + tx_uca, tx_mca, tx_bca, tx_all,\ + rx_jbr, rx_frag, rx_usize, rx_ovrrun,\ + fec_corr, fec_uncorr, fec_symbol_err") +header_all = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', + 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_std = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', + 'TX_OK', 'TX_BPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_errors_only = ['IFACE', 'STATE', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_fec_only = ['IFACE', 'STATE', 'FEC_CORR', 'FEC_UNCORR', 'FEC_SYMBOL_ERR'] +header_rates_only = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] + +rates_key_list = ['RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] +ratestat_fields = ("rx_bps", "rx_pps", "rx_util", "tx_bps", "tx_pps", "tx_util") +RateStats = namedtuple("RateStats", ratestat_fields) + +""" +The order and count of statistics mentioned below needs to be in sync with the values in portstat script +So, any fields added/deleted in here should be reflected in portstat script also +""" +BUCKET_NUM = 45 +counter_bucket_dict = { + 0: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS'], + 1: ['SAI_PORT_STAT_IF_IN_ERRORS'], + 2: ['SAI_PORT_STAT_IF_IN_DISCARDS'], + 3: ['SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS'], + 4: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS'], + 5: ['SAI_PORT_STAT_IF_OUT_ERRORS'], + 6: ['SAI_PORT_STAT_IF_OUT_DISCARDS'], + 7: ['SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS'], + 8: ['SAI_PORT_STAT_IF_IN_OCTETS'], + 9: ['SAI_PORT_STAT_IF_OUT_OCTETS'], + 10: ['SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS'], + 11: ['SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS'], + 12: ['SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS'], + 13: ['SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS'], + 14: ['SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS'], + 15: ['SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS'], + 16: ['SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS'], + 17: ['SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS'], + 18: ['SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS'], + 19: ['SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS'], + 20: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS'], + 21: ['SAI_PORT_STAT_IF_IN_MULTICAST_PKTS'], + 22: ['SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], + 23: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_MULTICAST_PKTS', + 'SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], + 24: ['SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS'], + 25: ['SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS'], + 26: ['SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS'], + 27: ['SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS'], + 28: ['SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS'], + 29: ['SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS'], + 30: ['SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS'], + 31: ['SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS'], + 32: ['SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS'], + 33: ['SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS'], + 34: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS'], + 35: ['SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS'], + 36: ['SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], + 37: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS', + 'SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], + 38: ['SAI_PORT_STAT_ETHER_STATS_JABBERS'], + 39: ['SAI_PORT_STAT_ETHER_STATS_FRAGMENTS'], + 40: ['SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS'], + 41: ['SAI_PORT_STAT_IP_IN_RECEIVES'], + 42: ['SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES'], + 43: ['SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES'], + 44: ['SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS'] +} + +STATUS_NA = 'N/A' + +RATES_TABLE_PREFIX = "RATES:" + +COUNTER_TABLE_PREFIX = "COUNTERS:" +COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" + +PORT_STATUS_TABLE_PREFIX = "PORT_TABLE:" +PORT_STATE_TABLE_PREFIX = "PORT_TABLE|" +PORT_OPER_STATUS_FIELD = "oper_status" +PORT_ADMIN_STATUS_FIELD = "admin_status" +PORT_STATUS_VALUE_UP = 'UP' +PORT_STATUS_VALUE_DOWN = 'DOWN' +PORT_SPEED_FIELD = "speed" + +PORT_STATE_UP = 'U' +PORT_STATE_DOWN = 'D' +PORT_STATE_DISABLED = 'X' + +LINECARD_PORT_STAT_TABLE = 'LINECARD_PORT_STAT_TABLE' +LINECARD_PORT_STAT_MARK_TABLE = 'LINECARD_PORT_STAT_MARK_TABLE' +CHASSIS_MIDPLANE_INFO_TABLE = 'CHASSIS_MIDPLANE_TABLE' + + +class Portstat(object): + def __init__(self, namespace, display_option): + self.db = None + self.multi_asic = multi_asic_util.MultiAsic(display_option, namespace) + if device_info.is_supervisor(): + self.db = SonicV2Connector(use_unix_socket_path=False) + self.db.connect(self.db.CHASSIS_STATE_DB, False) + + def get_cnstat_dict(self): + self.cnstat_dict = OrderedDict() + self.cnstat_dict['time'] = datetime.datetime.now() + self.ratestat_dict = OrderedDict() + if device_info.is_supervisor(): + self.collect_stat_from_lc() + else: + self.collect_stat() + return self.cnstat_dict, self.ratestat_dict + + def collect_stat_from_lc(self): + # Retrieve the current counter values from all LCs + + # Clear stale records + self.db.delete_all_by_pattern(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "*") + self.db.delete_all_by_pattern(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_MARK_TABLE + "*") + + # Check how many linecards are connected + tempdb = SonicV2Connector(use_unix_socket_path=False) + tempdb.connect(tempdb.STATE_DB, False) + linecard_midplane_keys = tempdb.keys(tempdb.STATE_DB, CHASSIS_MIDPLANE_INFO_TABLE + "*") + lc_count = 0 + if not linecard_midplane_keys: + # LC has not published it's Counter which could be due to chassis_port_counter_monitor.service not running + print("No linecards are connected!") + return + else: + for key in linecard_midplane_keys: + linecard_status = tempdb.get(tempdb.STATE_DB, key, "access") + if linecard_status == "True": + lc_count += 1 + + # Notify the Linecards to publish their counter values instantly + self.db.set(self.db.CHASSIS_STATE_DB, "GET_LINECARD_COUNTER|pull", "enable", "true") + time.sleep(2) + + # Check if all LCs have published counters + linecard_names = self.db.keys(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_MARK_TABLE + "*") + linecard_port_aliases = self.db.keys(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "*") + if not linecard_port_aliases: + # LC has not published it's Counter which could be due to chassis_port_counter_monitor.service not running + print("Linecard Counter Table is not available.") + return + if len(linecard_names) != lc_count: + print("Not all linecards have published their counter values.") + return + + # Create the dictornaries to store the counter values + cnstat_dict = OrderedDict() + cnstat_dict['time'] = datetime.datetime.now() + ratestat_dict = OrderedDict() + + # Get the counter values from CHASSIS_STATE_DB + for key in linecard_port_aliases: + rx_ok = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_ok") + rx_bps = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_bps") + rx_pps = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_pps") + rx_util = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_util") + rx_err = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_err") + rx_drop = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_drop") + rx_ovr = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_ovr") + tx_ok = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_ok") + tx_bps = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_bps") + tx_pps = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_pps") + tx_util = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_util") + tx_err = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_err") + tx_drop = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_drop") + tx_ovr = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_ovr") + port_alias = key.split("|")[-1] + cnstat_dict[port_alias] = NStats._make([rx_ok, rx_err, rx_drop, rx_ovr, tx_ok, tx_err, tx_drop, tx_ovr] + + [STATUS_NA] * (len(NStats._fields) - 8))._asdict() + ratestat_dict[port_alias] = RateStats._make([rx_bps, rx_pps, rx_util, tx_bps, tx_pps, tx_util]) + self.cnstat_dict.update(cnstat_dict) + self.ratestat_dict.update(ratestat_dict) + + @multi_asic_util.run_on_multi_asic + def collect_stat(self): + """ + Collect the statisitics from all the asics present on the + device and store in a dict + """ + + cnstat_dict, ratestat_dict = self.get_cnstat() + self.cnstat_dict.update(cnstat_dict) + self.ratestat_dict.update(ratestat_dict) + + def get_cnstat(self): + """ + Get the counters info from database. + """ + def get_counters(port): + """ + Get the counters from specific table. + """ + fields = ["0"]*BUCKET_NUM + + _, fvs = counter_table.get(PortCounter(), port) + fvs = dict(fvs) + for pos, cntr_list in counter_bucket_dict.items(): + for counter_name in cntr_list: + if counter_name not in fvs: + fields[pos] = STATUS_NA + elif fields[pos] != STATUS_NA: + fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) + + cntr = NStats._make(fields)._asdict() + return cntr + + def get_rates(table_id): + """ + Get the rates from specific table. + """ + fields = ["0", "0", "0", "0", "0", "0"] + for pos, name in enumerate(rates_key_list): + full_table_id = RATES_TABLE_PREFIX + table_id + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name) + if counter_data is None: + fields[pos] = STATUS_NA + elif fields[pos] != STATUS_NA: + fields[pos] = float(counter_data) + cntr = RateStats._make(fields) + return cntr + + # Get the info from database + counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + # Build a dictionary of the stats + cnstat_dict = OrderedDict() + cnstat_dict['time'] = datetime.datetime.now() + ratestat_dict = OrderedDict() + counter_table = CounterTable(self.db.get_redis_client(self.db.COUNTERS_DB)) + if counter_port_name_map is None: + return cnstat_dict, ratestat_dict + for port in natsorted(counter_port_name_map): + port_name = port.split(":")[0] + if self.multi_asic.skip_display(constants.PORT_OBJ, port_name): + continue + cnstat_dict[port] = get_counters(port) + ratestat_dict[port] = get_rates(counter_port_name_map[port]) + return cnstat_dict, ratestat_dict + + def get_port_speed(self, port_name): + """ + Get the port speed + """ + # Get speed from APPL_DB + state_db_table_id = PORT_STATE_TABLE_PREFIX + port_name + app_db_table_id = PORT_STATUS_TABLE_PREFIX + port_name + for ns in self.multi_asic.get_ns_list_based_on_options(): + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + speed = self.db.get(self.db.STATE_DB, state_db_table_id, PORT_SPEED_FIELD) + oper_status = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_OPER_STATUS_FIELD) + if speed is None or speed == STATUS_NA or oper_status != "up": + speed = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_SPEED_FIELD) + if speed is not None: + return int(speed) + return STATUS_NA + + def get_port_state(self, port_name): + """ + Get the port state + """ + if device_info.is_supervisor(): + self.db.connect(self.db.CHASSIS_STATE_DB, False) + return self.db.get(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "|" + port_name, "state") + + full_table_id = PORT_STATUS_TABLE_PREFIX + port_name + for ns in self.multi_asic.get_ns_list_based_on_options(): + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + admin_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_ADMIN_STATUS_FIELD) + oper_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_OPER_STATUS_FIELD) + + if admin_state is None or oper_state is None: + continue + if admin_state.upper() == PORT_STATUS_VALUE_DOWN: + return PORT_STATE_DISABLED + elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_UP: + return PORT_STATE_UP + elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_DOWN: + return PORT_STATE_DOWN + else: + return STATUS_NA + return STATUS_NA + + def cnstat_print(self, cnstat_dict, ratestat_dict, intf_list, use_json, print_all, + errors_only, fec_stats_only, rates_only, detail=False): + """ + Print the cnstat. + """ + + if intf_list and detail: + self.cnstat_intf_diff_print(cnstat_dict, {}, intf_list) + return None + + table = [] + header = None + + for key in natsorted(cnstat_dict.keys()): + if key == 'time': + continue + if intf_list and key not in intf_list: + continue + port_speed = self.get_port_speed(key) + data = cnstat_dict[key] + rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) + if print_all: + header = header_all + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + elif errors_only: + header = header_errors_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + elif fec_stats_only: + header = header_fec_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data['fec_corr']), + format_number_with_comma(data['fec_uncorr']), + format_number_with_comma(data['fec_symbol_err']))) + elif rates_only: + header = header_rates_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + header = header_std + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") + + def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): + """ + Print the difference between two cnstat results for interface. + """ + + for key in natsorted(cnstat_new_dict.keys()): + cntr = cnstat_new_dict.get(key) + if key == 'time': + continue + + if key in cnstat_old_dict: + old_cntr = cnstat_old_dict.get(key) + else: + old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() + + if intf_list and key not in intf_list: + continue + + print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], + old_cntr['rx_64']))) + print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], + old_cntr['rx_65_127']))) + print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], + old_cntr['rx_128_255']))) + print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], + old_cntr['rx_256_511']))) + print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], + old_cntr['rx_512_1023']))) + print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], + old_cntr['rx_1024_1518']))) + print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], + old_cntr['rx_1519_2047']))) + print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], + old_cntr['rx_2048_4095']))) + print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], + old_cntr['rx_4096_9216']))) + print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], + old_cntr['rx_9217_16383']))) + + print("") + print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], + old_cntr['rx_all']))) + print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], + old_cntr['rx_uca']))) + print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], + old_cntr['rx_mca']))) + print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], + old_cntr['rx_bca']))) + + print("") + print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], + old_cntr['rx_jbr']))) + print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], + old_cntr['rx_frag']))) + print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], + old_cntr['rx_usize']))) + print("Overruns Received.............................. {}".format(ns_diff(cntr["rx_ovrrun"], + old_cntr["rx_ovrrun"]))) + + print("") + print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], + old_cntr['tx_64']))) + print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], + old_cntr['tx_65_127']))) + print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], + old_cntr['tx_128_255']))) + print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], + old_cntr['tx_256_511']))) + print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], + old_cntr['tx_512_1023']))) + print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], + old_cntr['tx_1024_1518']))) + print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], + old_cntr['tx_1519_2047']))) + print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], + old_cntr['tx_2048_4095']))) + print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], + old_cntr['tx_4096_9216']))) + print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], + old_cntr['tx_9217_16383']))) + + print("") + print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], + old_cntr['tx_all']))) + print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], + old_cntr['tx_uca']))) + print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], + old_cntr['tx_mca']))) + print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], + old_cntr['tx_bca']))) + + print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) + + def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, + ratestat_dict, intf_list, use_json, + print_all, errors_only, fec_stats_only, + rates_only, detail=False): + """ + Print the difference between two cnstat results. + """ + + if intf_list and detail: + self.cnstat_intf_diff_print(cnstat_new_dict, cnstat_old_dict, intf_list) + return None + + table = [] + header = None + + for key in natsorted(cnstat_new_dict.keys()): + cntr = cnstat_new_dict.get(key) + if key == 'time': + continue + old_cntr = None + if key in cnstat_old_dict: + old_cntr = cnstat_old_dict.get(key) + + rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(ratestat_fields))) + + if intf_list and key not in intf_list: + continue + port_speed = self.get_port_speed(key) + + if print_all: + header = header_all + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + elif errors_only: + header = header_errors_only + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + elif fec_stats_only: + header = header_fec_only + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), + ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), + ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr['fec_corr']), + format_number_with_comma(cntr['fec_uncorr']), + format_number_with_comma(cntr['fec_symbol_err']))) + + elif rates_only: + header = header_rates_only + if old_cntr is not None: + table.append((key, + self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + table.append((key, + self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + header = header_std + if old_cntr is not None: + table.append((key, + self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, + self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") diff --git a/wol/__init__.py b/wol/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/wol/main.py b/wol/main.py deleted file mode 100644 index 3b569a3a4f..0000000000 --- a/wol/main.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env python3 - -""" -use wol to generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - -Usage: wol_click [OPTIONS] INTERFACE TARGET_MAC - - Generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - -Options: - -b Use broadcast MAC address instead of target device's MAC - address as Destination MAC Address in Ethernet Frame Header. - [default: False] - -p password An optional 4 or 6 byte password, in ethernet hex format or - quad-dotted decimal [default: ] - -c count For each target MAC address, the count of magic packets to - send. count must between 1 and 5. This param must use with -i. - [default: 1] - -i interval Wait interval milliseconds between sending each magic packet. - interval must between 0 and 2000. This param must use with -c. - [default: 0] - -v Verbose output [default: False] - -h, --help Show this message and exit. - -Examples: - wol Ethernet10 00:11:22:33:44:55 - wol Ethernet10 00:11:22:33:44:55 -b - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 00:22:44:66:88:aa - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 192.168.1.1 -c 3 -i 2000 -""" - -import binascii -import click -import copy -import netifaces -import os -import socket -import time - -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) -EPILOG = """\b -Examples: - wol Ethernet10 00:11:22:33:44:55 - wol Ethernet10 00:11:22:33:44:55 -b - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 00:22:44:66:88:aa - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 192.168.1.1 -c 3 -i 2000 -""" -ORDINAL_NUMBER = ["0", "1st", "2nd", "3rd", "4th", "5th"] -ETHER_TYPE_WOL = b'\x08\x42' - - -class MacAddress(object): - """ - Class to handle MAC addresses and perform operations on them. - - Attributes: - - address: bytes - """ - - def __init__(self, address: str): - """ - Constructor to instantiate the MacAddress class. - - Parameters: - - address: str - The MAC address in the format '01:23:45:67:89:AB' or '01-23-45-67-89-AB'. - - Raises: - - ValueError: - Throws an error if the provided address is not in the correct format. - """ - try: - self.address = binascii.unhexlify(address.replace(':', '').replace('-', '')) - except binascii.Error: - raise ValueError("invalid MAC address") - if len(self.address) != 6: - raise ValueError("invalid MAC address") - - def __str__(self): - return ":".join(["%02x" % v for v in self.address]) - - def __eq__(self, other): - return self.address == other.address - - def to_bytes(self): - return copy.copy(self.address) - - -BROADCAST_MAC = MacAddress('ff:ff:ff:ff:ff:ff') - - -def is_root(): - return os.geteuid() == 0 - - -def get_interface_operstate(interface): - with open('/sys/class/net/{}/operstate'.format(interface), 'r') as f: - return f.read().strip().lower() - - -def get_interface_mac(interface): - return MacAddress(netifaces.ifaddresses(interface)[netifaces.AF_LINK][0].get('addr')) - - -def build_magic_packet(interface, target_mac, broadcast, password): - dst_mac = BROADCAST_MAC if broadcast else target_mac - src_mac = get_interface_mac(interface) - return dst_mac.to_bytes() + src_mac.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + target_mac.to_bytes() * 16 + password - - -def send_magic_packet(interface, target_mac, pkt, count, interval, verbose): - if verbose: - print("Sending {} magic packet to {} via interface {}".format(count, target_mac, interface)) - sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) - sock.bind((interface, 0)) - for i in range(count): - sock.send(pkt) - if verbose: - print("{} magic packet sent to {}".format(ORDINAL_NUMBER[i + 1], target_mac)) - if i + 1 != count: - time.sleep(interval / 1000) - sock.close() - - -def validate_interface(ctx, param, value): - if value not in netifaces.interfaces(): - raise click.BadParameter("invalid SONiC interface name {}".format(value)) - if get_interface_operstate(value) != 'up': - raise click.BadParameter("interface {} is not up".format(value)) - return value - - -def parse_target_mac(ctx, param, value): - mac_list = [] - for mac in value.split(','): - try: - mac_list.append(MacAddress(mac)) - except ValueError: - raise click.BadParameter("invalid MAC address {}".format(mac)) - return mac_list - - -def parse_password(ctx, param, value): - if len(value) == 0: - return b'' # Empty password is valid. - elif len(value) <= 15: # The length of a valid IPv4 address is less or equal to 15. - try: - password = socket.inet_aton(value) - except OSError: - raise click.BadParameter("invalid password format") - else: # The length of a valid MAC address is 17. - try: - password = MacAddress(value).to_bytes() - except ValueError: - raise click.BadParameter("invalid password format") - if len(password) not in [4, 6]: - raise click.BadParameter("password must be 4 or 6 bytes or empty") - return password - - -def validate_count_interval(count, interval): - if count is None and interval is None: - return 1, 0 # By default, count=1 and interval=0. - if count is None or interval is None: - raise click.BadParameter("count and interval must be used together") - # The values are confirmed in valid range by click.IntRange(). - return count, interval - - -@click.command(context_settings=CONTEXT_SETTINGS, epilog=EPILOG) -@click.argument('interface', type=click.STRING, callback=validate_interface) -@click.argument('target_mac', type=click.STRING, callback=parse_target_mac) -@click.option('-b', 'broadcast', is_flag=True, show_default=True, default=False, - help="Use broadcast MAC address instead of target device's MAC address as Destination MAC Address in Ethernet Frame Header.") -@click.option('-p', 'password', type=click.STRING, show_default=True, default='', callback=parse_password, metavar='password', - help='An optional 4 or 6 byte password, in ethernet hex format or quad-dotted decimal') -@click.option('-c', 'count', type=click.IntRange(1, 5), metavar='count', show_default=True, # default=1, - help='For each target MAC address, the count of magic packets to send. count must between 1 and 5. This param must use with -i.') -@click.option('-i', 'interval', type=click.IntRange(0, 2000), metavar='interval', # show_default=True, default=0, - help="Wait interval milliseconds between sending each magic packet. interval must between 0 and 2000. This param must use with -c.") -@click.option('-v', 'verbose', is_flag=True, show_default=True, default=False, - help='Verbose output') -def wol(interface, target_mac, broadcast, password, count, interval, verbose): - """ - Generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - """ - count, interval = validate_count_interval(count, interval) - - if not is_root(): - raise click.ClickException("root priviledge is required to run this script") - - for mac in target_mac: - pkt = build_magic_packet(interface, mac, broadcast, password) - try: - send_magic_packet(interface, mac, pkt, count, interval, verbose) - except Exception as e: - raise click.ClickException(f'Exception: {e}') - - -if __name__ == '__main__': - wol()