Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
A Testbed parameter file is a Python script which defines the testbed parameters as variables. Testbed files are available in the ~/testbeds folder.
//Sample TestBed file
LEAF01_Ports = {"s2_p1": "Ethernet32", "s2_p1_speed": 100000,
"s2_p2": "Ethernet36", "s2_p2_speed": 100000,
"s1_p1": "Ethernet0", "s1_p1_speed": 100000,
"s1_p2": "Ethernet4", "s1_p2_speed": 100000,
"l2_p1": "Ethernet16", "l2_p1_speed": 100000,
"l2_p2": "Ethernet20", "l2_p2_speed": 100000,
"ixia_p1": "Ethernet60", "ixia_p1_speed": 100000,
"ixia_p2": "Ethernet48", "ixia_p2_speed": 100000,
"port_mtu": 9100
}
LEAF01 = {"IP": "10.4.4.66",
"PROTO": "http", "REST_PORT": 6002, "SSH_PORT": 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP, "cliWarnings": params.CLI_WARN_REGEXP,
"ssh_user": "admin",
"ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": LEAF01_Ports, "name": "MLNX-LEAF01",
"backup_cfg_file": "clean_config.json"
}1;8

IXIA_Ports = {"s1_p1": "<port_no>", "s1_p2": "<port_no>", "s1_p3": "<port_no>",
"media": "fiber", "speed": "100G",
"port_configs": {
"<localuhd>/<8>": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},IXIA_Ports = {"s1_p1": "<card_no>;<Port_no>", "s1_p2": "<card_no>;<Port_no>", "s1_p3": "<card_no>;<Port_no>",
"media": "fiber", "speed": "100G",
"port_configs": {
"<chassis_ip>;<card_no>;<Port_no>": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},# Ixia Parameters #
IXIA_Ports = {"s1_p1": "8", "s1_p2": "9", "s1_p3": "10",
"s2_p1": "11", "s2_p2": "12",
"l1_p1": "8", "l1_p2": "9", "l1_p3": "10",
"l2_p1": "11", "l2_p2": "12",
"media": "fiber", "speed": "100G", "global_traffic_rate": 25,
..."""
Description: Testbed information
"""
from genlibs import params
from genlibs import const
gParams = params.GLOBAL_PARAMS
pParams = params.PLATFORM_PARAMS
# PLS. DON'T CHANGE ANYTHING FROM HERE TO TOP OF THE FILE
# =================================================
ALL_DUTS = ["LEAF01", "LEAF02", "SPINE01", "SPINE02"]
# =================================================
CLEANUP_BY_REBOOT = False
CLEANUP_BY_CFG_RELOAD = False
CFG_RELOAD_BY_REBOOT = False
CHECK_COMPATIBILITY = False
CLEANUP_BEFORE_TEST_RUN = False
NET_SERVICES_CONTAINER_NAME = "net_services"
NTP_SERVER = "10.4.5.245"
INTF_UP_WAIT_TIME = 30
REBOOT_WAIT_TIME = 300
CLI_TIMEOUT = 0
MAX_V4_ACL = 64
MAX_INGRESS_V4_ACL = 64
MAX_V6_ACL = 64
MAX_SECONDARY_SUBNET = 25
MAX_IPV4_HOST_ROUTES = 1000
MAX_IPV6_HOST_ROUTES = 1000
MAX_IPV4_PREFIX_ROUTES = 1000
MAX_IPV6_PREFIX_ROUTES = 32000
MAX_IPV4_ROUTES_PER_NEXTHOP = 256
MAX_IPV4_NEXTHOPS = 2048
TECHSUPPORT = True
TECHSUPPORT_SINCE = "hour ago"
TECHSUPPORT_TIMEOUT = 300
STRESS_AVAIL_CORES = 2 # Number of CPU cores reserved for system use; all other cores will undergo stress testing
STRESS_MEM_UTIL = 85 # Targeted percentage of total system memory to allocate for stress testing
SERVER_IP = "10.20.0.75" # IP address of the server hosting the stress-ng Docker image
SERVER_USER_ID = "oper" # User ID for SCP access to the server hosting the stress-ng Docker image
SERVER_PASSWORD = "oper@123" # Password for SCP access to securely transfer the stress-ng Docker image
SYSLOG_SRVS = {"Servers": ["10.4.5.245", "10.4.5.6"], "Log_Folder": "/var/log/sonic_logs"}
TACACS_SRVS = [{"address": "10.4.5.177", "secret_key": "T@c@csSonic123"},
{"address": "10.4.5.179", "secret_key": "T@c@csSonic123"}]
TACACS_USERS = {"admin_user": "tacadmin", "admin_passwd": "sadmin@123", "oper_user": "tacuser",
"oper_passwd": "suser@123"}
IXIA_Ports = {"l1_p1": "21", "l1_p2": "24",
"l2_p1": "22", "l2_p2": "23",
"global_traffic_rate": 80,
"media": "fiber", "speed": "100G",
"port_configs": {
"localuhd/21": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},
"localuhd/22": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},
"localuhd/23": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},
"localuhd/24": {"speed": "100G", "auto_negotiation": False, "rs_fec": True}
}
}
IXIA = {"IP": "10.4.4.10", "username": "aviz", "password": "aviz@123", "ports": IXIA_Ports}
LOGSRV1 = {"IP": "10.1.1.11", "SSH_PORT": 22, "ssh_user": "aviz", "ssh_passwd": "IxiaAviz2020", "Timeout": 30,
"name": "Syslog1", "CLI_PROMPTS": params.LINUX_PROMPTS}
# Linux Server to host services likes: NTP, Syslog, Tac_plus, etc. avtest user is in sudo group and no password
TESTSRV1 = {"IP": "10.109.9.112", "SSH_PORT": 22, "ssh_user": "avtest", "ssh_passwd": "avtest@123", "Timeout": 30,
"name": "TestSrv1", "CLI_PROMPTS": params.LINUX_PROMPTS}
SPINE01_Ports = {"l1_p1": "Ethernet0", "l1_p1_speed": 100000,
"l1_p2": "Ethernet4", "l1_p2_speed": 100000,
"l2_p1": "Ethernet24", "l2_p1_speed": 100000,
"l2_p2": "Ethernet28", "l2_p2_speed": 100000,
"s2_p1": "Ethernet32", "s2_p1_speed": 100000,
"s2_p2": "Ethernet36", "s2_p2_speed": 100000,
"port_mtu": 9100
}
SPINE01 = {"IP": "10.4.4.65",
"PROTO": "http", "REST_PORT": 9001, "SSH_PORT": 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP, "cliWarnings": params.CLI_WARN_REGEXP,
"ssh_user": "admin",
"ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": SPINE01_Ports, "name": "MLNX-SPINE01",
"backup_cfg_file": "clean_config.json"
}
SPINE02_Ports = {"l1_p1": "Ethernet16", "l1_p1_speed": 100000,
"l1_p2": "Ethernet20", "l1_p2_speed": 100000,
"l2_p1": "Ethernet0", "l2_p1_speed": 100000,
"l2_p2": "Ethernet4", "l2_p2_speed": 100000,
"s1_p1": "Ethernet32", "s1_p1_speed": 100000,
"s1_p2": "Ethernet36", "s1_p2_speed": 100000,
"port_mtu": 9100
}
SPINE02 = {"IP": "10.4.4.67",
"PROTO": "http", "REST_PORT": 6018, "SSH_PORT": 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP, "cliWarnings": params.CLI_WARN_REGEXP,
"ssh_user": "admin",
"ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": SPINE02_Ports, "name": "MLNX-SPINE02",
"backup_cfg_file": "clean_config.json"
}
LEAF01_Ports = {"s2_p1": "Ethernet16", "s2_p1_speed": 100000,
"s2_p2": "Ethernet20", "s2_p2_speed": 100000,
"s1_p1": "Ethernet0", "s1_p1_speed": 100000,
"s1_p2": "Ethernet4", "s1_p2_speed": 100000,
"l2_p1": "Ethernet32", "l2_p1_speed": 100000,
"l2_p2": "Ethernet36", "l2_p2_speed": 100000,
# provide the breakout modes for the supp ports in the below format
# "l2_p1": "Ethernet72", "l1_p3_speed": 100000,
# "l2_p1_breakout": "1x100G[40G], 2x50G, 4x25G, 4x10G",
"ixia_p1": "Ethernet60", "ixia_p1_speed": 100000,
"ixia_p2": "Ethernet48", "ixia_p2_speed": 100000,
"port_mtu": 9100
}
LEAF01 = {"IP": "10.4.4.66",
"PROTO": "http", "REST_PORT": 6002, "SSH_PORT": 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP, "cliWarnings": params.CLI_WARN_REGEXP,
"ssh_user": "admin",
"ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": LEAF01_Ports, "name": "MLNX-LEAF01",
"backup_cfg_file": "clean_config.json"
}
LEAF02_Ports = {"s2_p1": "Ethernet0", "s2_p1_speed": 100000,
"s2_p2": "Ethernet4", "s2_p2_speed": 100000,
"s1_p1": "Ethernet24", "s1_p1_speed": 100000,
"s1_p2": "Ethernet28", "s1_p2_speed": 100000,
"l1_p1": "Ethernet32", "l1_p1_speed": 100000,
"l1_p2": "Ethernet36", "l1_p2_speed": 100000,
# provide the breakout modes for the supp ports in the below format
# "l1_p3": "Ethernet72", "l1_p3_speed": 100000,
# "l1_p3_breakout": "1x100G[40G], 2x50G, 4x25G, 4x10G",
"ixia_p1": "Ethernet60", "ixia_p1_speed": 100000,
"ixia_p2": "Ethernet48", "ixia_p2_speed": 100000,
"port_mtu": 9100
}
LEAF02 = {"IP": "10.4.4.68",
"PROTO": "http", "REST_PORT": 6002, "SSH_PORT": 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP, "cliWarnings": params.CLI_WARN_REGEXP,
"ssh_user": "admin",
"ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": LEAF02_Ports, "name": "MLNX-LEAF02",
"backup_cfg_file": "clean_config.json"
}"""
Description: Testbed information
"""
from genlibs import params
from genlibs import const
gParams = params.GLOBAL_PARAMS
pParams = params.PLATFORM_PARAMS
# =================================================
ALL_DUTS = ['SPINE01', 'SPINE02']
# =================================================
CLEANUP_BY_REBOOT = False
CLEANUP_BY_CFG_RELOAD = False
CFG_RELOAD_BY_REBOOT = False
CHECK_COMPATIBILITY = False
CLEANUP_BEFORE_TEST_RUN = False
NTP_SERVER = "10.4.5.4"
INTF_UP_WAIT_TIME = 30
REBOOT_WAIT_TIME = 120
CLI_TIMEOUT = 0
MAX_V4_ACL = 64
MAX_V6_ACL = 64
MAX_SECONDARY_SUBNET = 25
MAX_IPV4_HOST_ROUTES = 1000
MAX_IPV6_HOST_ROUTES = 1000
MAX_IPV4_PREFIX_ROUTES = 1000
MAX_IPV4_NEXTHOPS = 256
ZTP_PARAMS = {"ZTP_HTTP_SRV_ADDR": "10.4.5.177", "ZTP_HTTP_SRV_PORT": "8090", "ZTP_FOLDER": "/home/oper/reports/ztp",
"DHCP_CONTAINER": "ztp_dhcp"}
NET_SERVICES_CONTAINER_NAME = "net_services"
SYSLOG_SRVS = {"Servers": ["<SYSLOG server IP1>", "<SYSLOG server IP2>"], 'Log_Folder': "/var/log/sonic_logs"}
TACACS_SRVS = [{"address": "<IP address1>", "secret_key": "T@c@csSonic123"},
{"address": "<IP address2>", "secret_key": "T@c@csSonic123"}]
TACACS_USERS = {"admin_user": "tacadmin", "admin_passwd": "sadmin@123",
"oper_user": "tacuser", "oper_passwd": "suser@123"}
# Ixia Parameters #
IXIA_Ports = {"s1_p1": "8", "s1_p2": "9", "s1_p3": "10",
"s2_p1": "11", "s2_p2": "12",
"l1_p1": "8", "l1_p2": "9", "l1_p3": "10",
"l2_p1": "11", "l2_p2": "12",
"media": "fiber", "speed": "100G",
"port_configs": {
"localuhd/8": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},
"localuhd/9": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},
"localuhd/10": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},
"localuhd/11": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},
"localuhd/12": {"speed": "100G", "auto_negotiation": False, "rs_fec": True},
}}
IXIA = {"IP": "<Ixia IP>", "username": "<ix username>", "password": "<Ixia passwd>", "ports": IXIA_Ports}
# DUTs Parameters #
SPINE01_Ports = {"s2_p1": "Ethernet0", "s2_p1_speed": 100000,
"s2_p2": "Ethernet8", "s2_p2_speed": 100000,
"ixia_p1": "Ethernet232",
"ixia_p2": "Ethernet240",
"ixia_p3": "Ethernet248"}
SPINE01 = {"IP": "<SPINE1 IP>", 'SSH_PORT': 22, "CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP,
"cliWarnings": params.CLI_WARN_REGEXP,
"climode": const.CliModes.SONiC_CLI, "ports": SPINE01_Ports,
"ssh_user": "admin", "ssh_passwd": "Innovium123",
"Timeout": 30, "name": "Spine1",
"backup_cfg_file": "clean_config.json"}
SPINE02_Ports = {"s1_p1": "Ethernet0", "s1_p1_speed": 100000,
"s1_p2": "Ethernet8", "s1_p2_speed": 100000,
"ixia_p1": "Ethernet240",
"ixia_p2": "Ethernet248"}
SPINE02 = {"IP": "<SPINE2 IP>", 'SSH_PORT': 22, "CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP,
"cliWarnings": params.CLI_WARN_REGEXP,
"climode": const.CliModes.SONiC_CLI, "ports": SPINE02_Ports,
"ssh_user": "admin", "ssh_passwd": "Innovium123",
"Timeout": 30, "name": "Spine2",
"backup_cfg_file": "clean_config.json"}
LEAF01_Ports = {"l2_p1": "Ethernet32", "l2_p1_speed": 100000,
"l2_p2": "Ethernet40", "l2_p2_speed": 100000,
"ixia_p1": "Ethernet240",
"ixia_p2": "Ethernet248"}
LEAF01 = {"IP": "<SPINE1 IP>", 'SSH_PORT': 22, "CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP,
"cliWarnings": params.CLI_WARN_REGEXP,
"climode": const.CliModes.SONiC_CLI, "ports": LEAF01_Ports,
"ssh_user": "admin", "ssh_passwd": "Innovium123",
"Timeout": 30, "name": "Leaf1",
"backup_cfg_file": "clean_config.json"}
LEAF02_Ports = {"l1_p1": "Ethernet32", "l1_p1_speed": 100000,
"l1_p2": "Ethernet40", "l1_p2_speed": 100000,
"ixia_p1": "Ethernet240",
"ixia_p2": "Ethernet248"}
LEAF02 = {"IP": "<SPINE2 IP>", 'SSH_PORT': 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP,
"cliWarnings": params.CLI_WARN_REGEXP,
"climode": const.CliModes.SONiC_CLI, "ports": LEAF02_Ports,
"ssh_user": "admin", "ssh_passwd": "Innovium123",
"Timeout": 30, "name": "Leaf2",
"backup_cfg_file": "clean_config.json"}"""
Description: Testbed information
"""
from genlibs import params
from genlibs import const
gParams = params.GLOBAL_PARAMS
pParams = params.PLATFORM_PARAMS
# =================================================
ALL_DUTS = ['LEAF01', 'LEAF02', 'SPINE01', 'SPINE02']
# =================================================
result_dir = "/home/oper/reports"
CLEANUP_BY_REBOOT = False
CLEANUP_BY_CFG_RELOAD = False
West_Ixia_Params = {"ports": [21], "ixmedia": "fiber", "ixspeed": "100G", "peer": "leaf1"}
East_Ixia_Params = {"ports": [22], "ixmedia": "fiber", "ixspeed": "100G", "peer": "leaf2"}
IXIA = {"IP": "10.4.4.10", "username": "aviz", "password": "aviz@123", "ixmedia": "fiber", "ixspeed": "100G"}
INTF_UP_WAIT_TIME = 30
REBOOT_WAIT_TIME = 300
CLI_TIMEOUT = 0
PCH_CONFIGURATION = False # to disable the PCH configuration in chaos scripts, default is True
TECHSUPPORT = True
TECHSUPPORT_SINCE = "hour ago"
TECHSUPPORT_TIMEOUT = 120
ACCEPTABLE_DELTA = 0.5 #Threshold for acceptable packet/frame loss percentage
CPU_MEM_THRESHOLD = 5 ## Threshold for acceptable change in CPU/memory utilization percentage
# Use a network with prefixlen = 24
MASTER_NETWORK = "172.16.1.0/24"
S1_Ports = [
# Links from Spine1 to Leaf1
{"s1": "Ethernet0", "l1": "Ethernet0", "speed": 100000,
"netinfo": {"spine_ip": "", "leaf_ip": ""}, "peer": "leaf1"},
# {"s1": "Ethernet4", "l1": "Ethernet4", "speed": 100000,
# "netinfo": {"spine_ip": "", "leaf_ip": ""}, "peer": "leaf1"},
# Links from Spine1 to Leaf2
{"s1": "Ethernet24", "l2": "Ethernet24", "speed": 100000,
"netinfo": {"spine_ip": "", "leaf_ip": ""}, "peer": "leaf2"}]
# {"s1": "Ethernet28", "l2": "Ethernet28", "speed": 100000,
# "netinfo": {"spine_ip": "", "leaf_ip": ""}, "peer": "leaf2"}]
S2_Ports = [
# Links from Spine2 to Leaf1
{"s2": "Ethernet16", "l1": "Ethernet16", "speed": 100000,
"netinfo": {"spine_ip": "", "leaf_ip": ""}, "peer": "leaf1"},
# {"s2": "Ethernet20", "l1": "Ethernet20", "speed": 100000,
# "netinfo": {"spine_ip": "", "leaf_ip": ""}, "peer": "leaf1"},
# Links from Spine2 to Leaf2
{"s2": "Ethernet0", "l2": "Ethernet0", "speed": 100000,
"netinfo": {"spine_ip": "", "leaf_ip": ""}, "peer": "leaf2"}]
# {"s2": "Ethernet4", "l2": "Ethernet4", "speed": 100000,
# "netinfo": {"spine_ip": "", "leaf_ip": ""}, "peer": "leaf2"}]
L1_Ixia_Ports = [
# Links from Leaf1 to Ixia
{"ixia": "21", "l1_ixia": "Ethernet60", "speed": 100000,
"netinfo": {"ixia_ip": "", "leaf_ip": ""},
"port_configs": {"localuhd/21": {"speed": "100G", "auto_negotiation": False, "rs_fec": True}}}]
L2_Ixia_Ports = [
# Links from Leaf1 to Ixia
{"ixia": "22", "l2_ixia": "Ethernet60", "speed": 100000,
"netinfo": {"ixia_ip": "", "leaf_ip": ""},
"port_configs": {"localuhd/22": {"speed": "100G", "auto_negotiation": False, "rs_fec": True}}}]
# {"ixia": "10", "l2_ixia": "Ethernet100", "speed": 1000000,
# "netinfo": {"ixia_ip": "", "leaf_ip": ""}}]
SPINE01 = {"IP": "10.4.4.65", 'SSH_PORT': 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP,
"cliWarnings": params.CLI_WARN_REGEXP,
"climode": const.CliModes.SONiC_CLI,
"ssh_user": "admin", "ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": S1_Ports, "name": "Spine1",
"backup_cfg_file": "clean_config.json"}
SPINE02 = {"IP": "10.4.4.67", 'SSH_PORT': 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP,
"cliWarnings": params.CLI_WARN_REGEXP,
"climode": const.CliModes.SONiC_CLI,
"ssh_user": "admin", "ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": S1_Ports, "name": "Spine2",
"backup_cfg_file": "clean_config.json"}
LEAF01 = {"IP": "10.4.4.66", 'SSH_PORT': 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP,
"cliWarnings": params.CLI_WARN_REGEXP,
"climode": const.CliModes.SONiC_CLI,
"ssh_user": "admin", "ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": S1_Ports + S2_Ports, "name": "Leaf1",
"backup_cfg_file": "clean_config.json"}
LEAF02 = {"IP": "10.4.4.68", 'SSH_PORT': 22,
"CLI_PROMPTS": params.CLI_PROMPTS,
"cliErrors": params.CLI_ERROR_REGEXP,
"cliWarnings": params.CLI_WARN_REGEXP,
"climode": const.CliModes.SONiC_CLI,
"ssh_user": "admin", "ssh_passwd": "YourPaSsWoRd",
"Timeout": 30, "ports": S1_Ports + S2_Ports, "name": "Leaf2",
"backup_cfg_file": "clean_config.json"}
oper@ftasvm:~$ ./qjob.py -a show
Job_Queue: []
Queue Status: paused
oper@ftasvm:~$ virsh console <VM_domain_name>
#Example -
#sonic@sonic-39:~$ virsh console FTAS_VM01
#Connected to domain FTAS_VM01
#Escape character is ^]
#oper@ftasvm:~$ 
This category covers the validation of mandatory features and functions required for data center deployments.
This section verifies the mandatory functions for management operation in a Fabric
sonic@sonic-39:~$ gunzip -c ftas_ones_vmi_1.1.2.qcow2.gz > ftas_ones_vmi_1.1.2.qcow2
sonic@sonic-39:~$ ls -l
total 8302936
-rw-rw-r-- 1 sonic sonic 4929683456 Feb 21 06:21 ftas_ones_vmi_1.1.2.qcow2
-rw-rw-r-- 1 sonic sonic 3572510886 Feb 21 06:20 ftas_ones_vmi_1.1.2.qcow2.gz
sonic@sonic-39:~$ oper@ftasvm:~$ ./qjob.py -h
usage: qjob.py [-h] [-a {add,remove,show,kill_job} | -S {running,paused}] [-s SUITEFILE] [-V]
Test Job Queue Submitter
optional arguments:
-h, --help show this help message and exit
-a {add,remove,show,kill_job}, --action {add,remove,show,kill_job}
add: Add job to queue; remove: remove job from queue; show: show queue; kill_job: kill running job
-S {running,paused}, --status {running,paused}
Set queue execution status. ["running" or "paused"]
-s SUITEFILE, --suitefile SUITEFILE
Yaml testsuite file to send to the execution queue
-V, --version Show FTAS VM version
oper@ftasvm:~$ ssh <username>@<mgmt ip address of the VM>
#Example -
#sonic@sonic-39:~$ ssh oper@192.168.3.37
#oper@192.168.3.37's password:
#Welcome to Ubuntu 20.04.4 LTS (GNU/Linux 5.4.0-137-generic x86_64)
# * Documentation: https://help.ubuntu.com
# * Management: https://landscape.canonical.com
# * Support: https://ubuntu.com/advantage
#This system has been minimized by removing packages and content that are
#not required on a system that users do not log into.
#To restore this content, you can run the 'unminimize' command.
#Last login: Tue Feb 21 08:24:31 2023
#oper@ftasvm:~$ 
oper@linux:~$ ./qjob.py -S runningsudo apt-get update && sudo apt-get upgradesudo apt install libvirt-clients libvirt-daemon-system libvirt-daemon virtinst bridge-utils qemu qemu-kvmkvm-ok
# You should see a message like "KVM acceleration can be used"
sonic@sonic-39:~$ kvm-ok
INFO: /dev/kvm exists
KVM acceleration can be used
sonic@sonic-39:~$ sudo getent group | grep libvirt
sonic@sonic-39:~$ sudo getent group | grep libvirt
libvirt:x:119:sonic,root
libvirt-qemu:x:64055:libvirt-qemu
libvirt-dnsmasq:x:120:
sonic@sonic-39:~$ sudo groupadd --system libvirt
sonic@sonic-39:~$ sudo groupadd --system libvirt
groupadd: group 'libvirt' already exists
sonic@sonic-39:~$ sudo usermod -a -G libvirt $(whoami)sudo vi /etc/libvirt/qemu.conf
# Some examples of valid values are:
#
# user = "qemu" # A user named "qemu"
# user = "+0" # Super user (uid=0)
# user = "100" # A user named "100" or a user with uid=100
#
#user = "root"
user = "<your host user>"
# The group for QEMU processes run by the system instance. It can be
# specified in a similar way to the user.
group = "libvirt"sudo systemctl stop libvirtd
sudo systemctl start libvirtdsudo systemctl status libvirtd
sonic@sonic-39:~$ sudo systemctl status libvirtd
● libvirtd.service - Virtualization daemon
Loaded: loaded (/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled)
Active: active (running) since Sat 2023-02-18 10:16:26 UTC; 27s ago
Docs: man:libvirtd(8)
https://libvirt.org
Main PID: 68774 (libvirtd)
Tasks: 33 (limit: 32768)
CGroup: /system.slice/libvirtd.service
├─54120 /usr/bin/qemu-system-x86_64 -name guest=ftas03,debug-threads=on -S -object secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-8-ftas03/master-key.aes -machine pc-i440fx-1.5,accel
└─68774 /usr/sbin/libvirtd
Feb 18 10:16:26 sonic-39 systemd[1]: Starting Virtualization daemon...
Feb 18 10:16:26 sonic-39 systemd[1]: Started Virtualization daemon.
lines 1-13/13 (END)sudo apt-get install virt-manager#/etc/netplan/00-installer-config.yaml
network:
ethernets:
enp1s0:
dhcp4: no
bridges:
br0:
interfaces: [enp1s0]
dhcp4: yes
mtu: 1500
parameters:
stp: true
forward-delay: 4
dhcp6: no
version: 2#/etc/netplan/00-installer-config.yaml
network:
ethernets:
enp1s0:
dhcp4: no
bridges:
br0:
interfaces: [enp1s0]
addresses: [172.16.1.100/24]
gateway4: 172.16.1.1
mtu: 1500
nameservers:
addresses: [8.8.8.8, 8.8.4.4]
parameters:
stp: true
forward-delay: 4
dhcp4: no
dhcp6: no
version: 2sudo netplan applyoper@ftasvm:~$ ./qjob.py -a add -s testsuites/data_2dut.suite
oper@ftasvm:~$ ./qjob.py -a add -s testsuites/data_4dut.suite
oper@ftasvm:~$ ./qjob.py -a show
Job_Queue: ['/home/oper/testsuites/data_2dut.suite', '/home/oper/testsuites/data_4dut.suite']
Queue Status: paused
oper@ftasvm:~$ oper@ftasvm:~$ ./qjob.py -a remove -s /home/oper/testsuites/data_4dut.suite
[INFO]: Test job removed /home/oper/testsuites/data_4dut.suite
oper@ftasvm:~$
oper@ftasvm:~$ ./qjob.py -a show
Job_Queue: ['/home/oper/testsuites/data_2dut.suite']
Queue Status: paused
oper@ftasvm:~$ oper@linux:~$ ./qjob.py -a kill_job
Trying to terminate running job...done
[INFO]: The queue is paused. Please update its status after your debugging
oper@linux:~$ ./qjob.py -a remove -s /home/oper/testsuites/data_2dut.suite
[INFO]: Test job removed /home/oper/testsuites/data_2dut.suite
oper@ftasvm:~$ cd /etc/sonic/
cp config_db.json config_db.json.bak#reset to factory default
sudo rm /etc/sonic/config_db.json
sudo config-setup factory
sudo reboot// Sample DEVICE_METADATA
"DEVICE_METADATA": {
"localhost": {
"buffer_model": "traditional",
"default_bgp_status": "up",
"default_pfcwd_status": "disable",
"docker_routing_config_mode": "split",
"hostname": "INVM32K-02",
"hwsku": "Wistron_sw_to3200k_32x100",
"mac": "00:30:64:6f:61:ad",
"platform": "x86_64-wistron_sw_to3200k-r0",
"type": "not-provisioned"
}
}, "MGMT_INTERFACE": {
"eth0|10.4.4.65/23": {
"gwaddr": "10.4.4.1"
}
}, "Ethernet0": {
"admin_status": "down",
"alias": "etp1",
"index": "1",
"lanes": "0,1,2,3",
"mtu": "9100",
"speed": "100000"
},sudo cp /etc/sonic/config_db.json /etc/sonic/clean_config.jsonsudo config reload -y -fservice integrated-vtysh-configvtysh
show run
config t
<remove all BGP configurations>
write memoryCLEANUP_BY_REBOOT = False
CLEANUP_BY_CFG_RELOAD = Falsecd ~/reports/
rm report.txt oper@ftasvm:~$ ls -lrth
total 44K
drwxrwxr-x 3 oper oper 4.0K Nov 18 16:54 ones
-rwxrwxr-x 1 oper oper 1.9K Jan 27 10:19 qjob.py
drwxr-xr-x 2 oper oper 4.0K Jan 31 07:26 jobs
drwxr-xr-x 2 oper oper 4.0K Feb 7 11:03 configs
-rwx------ 1 oper oper 6.2K Feb 7 11:03 jobs.py
drwxrwxr-x 2 oper oper 4.0K Feb 18 12:54 __pycache__
drwxrw-rw- 2 oper oper 4.0K Feb 18 12:54 logs
drwxr-xr-x 2 oper oper 4.0K Feb 18 14:06 testbeds
drwxr-xr-x 2 oper oper 4.0K Feb 18 14:07 testsuites
drwxrwxrwx 10 oper oper 4.0K Feb 20 04:58 reports
oper@ftasvm:~$ oper@ftasvm:~/reports/test_report_20230218_125505$ ls -lrth
total 40K
drwxr-xr-x 2 root root 4.0K Feb 18 13:04 taas_acl_test.py
-rw-r--r-- 1 root root 22K Feb 18 13:04 ftas_2duts_topo.py.html
-rw-r--r-- 1 root root 9.0K Feb 18 13:04 ftas_2dut_suite.yaml.html
oper@ftasvm:~/reports/test_report_20230218_125505$ oper@ftasvm:~$ cd reports/
oper@ftasvm:~/reports$ sudo rm -rf test_report_20230218_125505
oper@ftasvm:~$ cd logs/
oper@ftasvm:~/logs$ rm -rf jobs.log







<domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
<name>FTAS_VM01</name>
<memory unit='KiB'>4194304</memory>
<currentMemory unit='KiB'>4194304</currentMemory>
<vcpu placement='static'>4</vcpu>
<resource>
<partition>/machine</partition>
</resource>
<os>
<type arch='x86_64' machine='pc-i440fx-1.5'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='writeback'/>
<source file='/home/oper/taas_vm/taas_vm_v3.qcow2' />
<target bus='virtio' dev='vda'/>
</disk>
<serial type='pty'>
<source path='/dev/pts/3'/>
<target port='0'/>
</serial>
<!-- Management interface eth0 -->
<interface type='network'>
<model type='e1000' />
<source network='br0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x00' function='0x0'/>
</interface>
<controller type='usb' index='0'/>
<memballoon model='virtio'>
<alias name='balloon0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</memballoon>
</devices>
</domain><network>
<name>br0</name>
<forward mode="bridge" />
<bridge name="br0" />
</network>#Execute the below command to attach the VM to the Linux Bridge
sonic@sonic-39:~$ virsh net-define bridged-network.xml
sonic@sonic-39:~$ virsh net-start br0
sonic@sonic-39:~$ virsh net-autostart br0
sonic@sonic-39:~$ virsh net-list
Name State Autostart Persistent
----------------------------------------------------------
br0 active yes yes
sonic@sonic-39:~$ virsh create <VM XML configuration file>
#sonic@sonic-39:~$ virsh create ftas.xml
#Domain FTAS_VM01 created from ftas.xml
#sonic@sonic-39:~$ sonic@sonic-39:~$ virsh list
Id Name State
----------------------------------------------------
8 FTAS_VM01 running
sonic@sonic-39:~$ sonic@sonic-39:~$ virsh console FTAS_VM01
Connected to domain ftas03
Escape character is ^]
ftasvm login: sudo nmcli con show
oper@ftasvm:~$ sudo nmcli con show
NAME UUID TYPE DEVICE
Wired connection 1 782de6d4-3867-3c5e-95fb-061ae39e5fae ethernet eth0
oper@ftasvm:~$
# Capture the connection NAME of eth0 devicesudo dhclient -v -r
oper@ftasvm:~$ sudo dhclient -v -r
Internet Systems Consortium DHCP Client 4.4.1
Copyright 2004-2018 Internet Systems Consortium.
All rights reserved.
For info, please visit https://www.isc.org/software/dhcp/
Listening on LPF/veth1dcacbe/b6:bc:e5:4a:7e:1f
Sending on LPF/veth1dcacbe/b6:bc:e5:4a:7e:1f
<..>
Sending on Socket/fallback
oper@ftasvm:~$ sudo nmcli con mod "Wired connection 1" ipv4.addresses <ip address>/<prefix>
#Example - sudo nmcli con mod "Wired connection 1" ipv4.addresses 192.168.0.37/24sudo nmcli connection modify "Wired connection 1" ipv4.gateway <GW Address>
#Example - sudo nmcli connection modify "Wired connection 1" ipv4.gateway 192.168.0.1sudo nmcli con mod "Wired connection 1" ipv4.method manualsudo nmcli device reapply <dev_name>
#Example - sudo nmcli device reapply eth0#verify the IP address
ip a
oper@ftasvm:~$ ip a
<..>
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 52:54:00:37:3c:5c brd ff:ff:ff:ff:ff:ff
inet 192.168.0.37/25 brd 192.168.0.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::70a4:9f2e:658c:4d29/64 scope link noprefixroute
valid_lft forever preferred_lft forever
<..>
oper@ftasvm:~$
#Verify IP method
oper@ftasvm:~$ sudo nmcli -f ipv4.method con show "Wired connection 1"
ipv4.method: manual
oper@ftasvm:~$ sonic@sonic-39:~$ cat /proc/sys/net/bridge/bridge-nf-call-iptables
1
sonic@sonic-39:~$ sudo echo 0 > /proc/sys/net/bridge/bridge-nf-call-iptables sudo nmcli con mod "<con_name>" +ipv4.addresses <ip address>/<prefix>
#Example - sudo nmcli con mod "Wired connection 1" ipv4.addresses 192.168.0.42/24
# Reapply config
sudo nmcli device reapply <dev_name>
#Example - sudo nmcli device reapply eth0
# Show IP address to verify
ip a
# Restart docker containers so their services can listen on new IP addressesoper@ftasvm:~$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
ztp_dhcp v1 599313a03bfb 41 hours ago 83.3MB
netservices v1 8a9c98506637 41 hours ago 259MB
oper@ftasvm:~$ d# NTP configuration file:
/etc/ntp.conf
# SYSLOG Configuration file
/etc/syslog-ng/conf.d/syslog_ng.conf
# Log files location:
/var/log/sonic_logs/<IP address of devices>.log
# TACACS+ configuraration
/etc/tacacs+/tac_plus.confoper@ftasvm:~$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8add12060a57 netservices:v1 "/usr/bin/supervisord" 41 hours ago Up 41 hours net_services
oper@ftasvm:~$ oper@ftasvm:~$ cd testsuites/
oper@ftasvm:~/testsuites$ ls -lrth
total 156K
-rw-rw-r-- 1 oper oper 405 Jul 4 07:57 verify_yaml.py
-rw-rw-r-- 1 oper oper 539 Jul 4 07:57 verify_testbed.suite
-rw-rw-r-- 1 oper oper 808 Jul 4 07:57 mgmt_3dut.suite
-rw-rw-r-- 1 oper oper 4.7K Jul 4 07:57 mgmt_2dut.suite
-rw-rw-r-- 1 oper oper 1.8K Jul 4 07:57 mgmt_1dut.suite
-rw-rw-r-- 1 oper oper 11K Jul 4 07:57 edgecore_9716_202111.suite
-rw-rw-r-- 1 oper oper 839 Jul 4 07:57 data_3dut.suite
-rw-rw-r-- 1 oper oper 1.8K Jul 4 07:57 data_1dut.suite
-rw-rw-r-- 1 oper oper 6.9K Jul 4 07:57 PD.suite
-rw-r--r-- 1 oper oper 12K Jul 6 16:13 wistron_6512_ecs2.0.0.suite
-rw-r--r-- 1 oper oper 12K Jul 6 16:13 wistron_3200_ecs2.0.0.suite
-rw-r--r-- 1 oper oper 12K Jul 6 16:13 nvidia_202205.suite
-rw-r--r-- 1 oper oper 6.5K Jul 6 16:13 mgmt_complete.suite
-rw-r--r-- 1 oper oper 7.7K Jul 6 16:13 edgecore_4630_202111.suite
-rw-r--r-- 1 oper oper 6.4K Jul 6 16:13 data_2dut.suite
drwxrwxr-x 2 oper oper 4.0K Jul 7 07:27 ebay
-rw-r--r-- 1 oper oper 5.9K Jul 7 07:27 PI.suite
-rw-r--r-- 1 oper oper 12K Jul 14 11:02 data_complete.suite
-rw-r--r-- 1 oper oper 4.5K Jul 16 10:37 data_4dut.suite
-rw-r--r-- 1 oper oper 8.2K Jul 17 05:58 copy_data_complete.suite
oper@ftasvm:~/testsuites$ ---
"TESTBED_ROOT_FOLDER": "/home/oper/testbeds"
"TESTSCRIPT_ROOT_FOLDER": "/home/oper"
"TESTSUITE_ROOT_FOLDER": ""
"TEST_REPORT_ROOT_FOLDER": "/home/oper/reports"
"CHECK_COMPATIBILITY": true
"TEST_CONTACT": "phili@aviznetworks.com"
"TESTSUITES": {
# 2 DUT test suites/scripts
"./feature/taas_acl_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_acl_001": ""}, {"test_acl_002": ""}, {"test_acl_003": ""},{"test_acl_004": ""},{"test_acl_005": ""},{"test_acl_006": ""},
{"test_acl_007": ""},{"test_acl_008": ""},{"test_acl_009": ""},{"test_acl_010": ""},{"test_acl_011": ""},{"test_acl_012": ""}]
},
"./feature/taas_mtu_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_mtu_001": ""}]
},
"./feature/taas_mgmt_Syslog_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_syslog_002": ""},{"test_syslog_004": ""}]
},
"./feature/taas_autoneg_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_autoneg_001": ""}]
},
"./feature/taas_layer2_Vlan_ixia_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_vlan_014": ""},{"test_vlan_016": ""}]
},
"./feature/taas_layer2_LACP_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_lacp_003": ""}, {"test_lacp_005": ""}, {"test_lacp_011": ""},{"test_lacp_012": ""}]
},
"./feature/taas_layer2_LLDP_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_lldp_001": ""}, {"test_lldp_002": ""}, {"test_lldp_013": ""}]
},
"./feature/taas_layer3_ARP_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_arp_003": ""}, {"test_arp_007": ""}, {"test_arp_011": ""},{"test_arp_012": ""}]
},
"./feature/taas_layer3_IP_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_IP_001": ""}, {"test_IP_002": ""}, {"test_IP_005": ""},{"test_IP_006": ""},{"test_IP_011": ""},{"test_IP_014": ""}]
},
"./feature/taas_mgmt_Ping_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_ping_001": ""}, {"test_ping_009": ""}, {"test_ping_011": ""}, {"test_ping_013": ""}]
},
"./feature/taas_platform_Interface_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_ports_002": ""}, {"test_ports_005": ""}, {"test_ports_006": ""},{"test_ports_008": ""},{"test_ports_009_14": ""}]
},
"./scalability/taas_qual_Scale.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_10_secondary_subnet_under_vlan": ""}, {"test_20_secondary_subnet_under_vlan": ""}, {"test_max_secondary_subnet_under_vlan": ""}, {"test_v4_host_routes_scale_2k": ""},
{"test_v4_host_routes_scale_4k": ""}, {"test_v4_host_routes_scale_max_supported": ""}, {"test_v6_host_routes_scale_2k": ""}, {"test_v6_host_routes_scale_4k": ""},
{"test_v6_host_routes_scale_max_supported": ""}, {"test_v4_prefix_routes_scale_2k": ""}, {"test_v4_prefix_routes_scale_4k": ""}, {"test_v4_prefix_routes_scale_max_supported": ""},
{"test_v4_nexthops_scale_512": ""}, {"test_v4_nexthops_scale_1024": ""}, {"test_v4_nexthops_scale_max_supported": ""}, {"test_qual_scale_007": ""}, {"test_qual_scale_011": ""},
{"test_v4_acl_scale_128": ""}, {"test_v4_acl_scale_256": ""}, {"test_v4_acl_scale_512": ""}, {"test_v4_acl_scale_max_supported": ""},
{"test_qual_v6_acl_scale_128": ""}, {"test_qual_v6_acl_scale_256": ""}, {"test_qual_v6_acl_scale_512": ""},
{"test_qual_v6_acl_scale_max_supported": ""}]
},
"./feature/taas_mgmt_SSH_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_ssh_001": ""},{"test_ssh_002": ""},{"test_ssh_003": ""}]
},
"./feature/taas_layer3_bgp_netops_test.py": {
"SKIP": false,
"COMMON_TESTBED": "ftas_2duts_topo.py",
"TESTCASES": [{"test_bgp_netops_002": ""}, {"test_bgp_netops_005_006": ""}, {"test_bgp_netops_008": ""},
{"test_bgp_netops_010": ""}]
},
"./feature/taas_layer3_BGP_ixia_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_bgp_004": ""},{"test_bgp_005": ""}]
},
"./feature/taas_qual_PortCfg_test.py": {
"SKIP":false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_ports_fec_001": ""},{"test_ports_mtu_002": ""},{"test_ports_counters": ""}]
},
"./feature/taas_qual_L3_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_qual_bgp_004": ""},{"test_qual_bgp_007": ""},{"test_qual_bgp_008": ""},
{"test_qual_bgp_009": ""},{"test_qual_bgp_010": ""},{"test_qual_bgp_013": ""},{"test_qual_bgp_014": ""},
{"test_qual_bgp_015": ""},{"test_qual_bgp_016": ""},{"test_qual_bgp_017": ""},
{"test_qual_bgp_019": ""},{"test_qual_vlan1": ""},{"test_qual_vrf": ""}]
},
"./feature/taas_qual_Mgmt_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_port_span_001": ""},{"test_port_span_002": ""},{"test_port_span_003": ""},
{"test_front_panel_ports_ipv6": ""}]
},
"./feature/taas_layer2_Vlan_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{ "test_vlan_004": "" },{ "test_vlan_005": "" },{ "test_vlan_007": "" }, {"test_vlan_008": ""},{"test_vlan_011": ""}]
},
"./feature/taas_qual_Security_test.py": {
"SKIP": false,
"COMMON_TESTBED": "full_mesh_topo.py",
"TESTCASES": [{"test_qual_ip6_acl_001": ""},{"test_qual_ip6_acl_002": ""},{"test_qual_ip6_acl_003": ""},
{"test_qual_ip6_acl_004": ""},{"test_qual_ip6_acl_005": ""},{"test_qual_ip6_acl_006": ""}]
},
"./feature/taas_qual_SNMP_test.py": {
"SKIP": false,
"COMMON_TESTBED": "ftas_mesh_topo.py",
"TESTCASES": [{"test_snmp_walk_inf_admin_oper": ""}, {"test_snmp_walk_ip_inf_index": ""}, {"test_snmp_walk_ip_to_mac": ""}]
}
}
































































































































































































































































































































































































